#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T1.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/10/7 22:19 
'''
# reproduce_experiment.py
"""
Simplified reproduction of the paper's experimental setup.
- Discrete-event simulation using simpy to emulate 3 production lines,
  each with 9 machines and 8 buffers.
- Simple machine failure/repair modeling using exponential MTBF/MTTR.
- Three experimental schemes:
    1) Shared WIP + simplified C-MOHFA style optimization
    2) Baseline Firefly-like algorithm with random transfer rates
    3) C-MOHFA style but with no sharing (transfer rates = 0)
Notes:
- This is an approximation: the paper used PlantSimulation+MATLAB.
- Parameters (MTBF/MTTR, rate bounds, costs, revenues) are taken from the paper.
References: see the uploaded file (paper) for original tables and settings.
"""
import random
import math
import simpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import deque, defaultdict
import time

RNG = np.random.RandomState(42)

# -----------------------
# Parameters (from paper)
# -----------------------
NUM_LINES = 3
NUM_MACHINES = 9
NUM_BUFFERS = 8  # buffers between machines
PRODUCT_TYPES = ['A','B','C','D','E']
PRODUCT_REVENUE = {'A':4.0,'B':4.5,'C':4.0,'D':5.0,'E':3.5}  # table4
# Production capacity ranges (units/min) - Tab.2, simplified to average
CAPACITY_BOUNDS = {
    # For each line L1,L2,L3, for M1..M9 provide [low,high]
    0: [[15,20],[8,13],[16,23],[8,14],[13,21],[17,21],[12,15],[9,13],[10,22]],
    1: [[15,18],[15,22],[9,12],[15,23],[8,13],[12,15],[17,23],[10,20],[9,18]],
    2: [[17,22],[12,16],[15,18],[12,13],[14,23],[10,14],[8,12],[14,18],[10,20]]
}
# MTBF (h) and MTTR (min) table simplified: we'll use mean MTBF hours and MTTR minutes for each machine/line from table1
# For brevity we'll use representative averages per machine index across 3 lines (the doc provides full table; here simplified)
MTBF_hours = [
    [280,270,282,288,296,270,334,318,248],  # L1
    [264,294,262,280,279,310,284,302,268],  # L2
    [278,285,326,295,269,284,317,297,260]   # L3
]
MTTR_min = [
    [28,26,28,28.5,29,26.5,32,27,24],
    [26,28.5,27,28,27,30,28,29.5,25],
    [28,27,30,27.5,28,30,31,28.5,27]
]
# Transfer cost table (Tab.3): mapping (fromLine, bufferIndex) -> cost to transfer one unit to other lines
TRANSFER_COST = {
    # Only providing non-empty entries from the paper
    # Format: (line, buffer_idx): {other_line: cost}
    (0,1): {1:5}, (0,3): {1:5}, (0,5): {1:5}, (0,6): {1:6},
    (1,1): {0:5}, (1,2): {2:4}, (1,3): {0:5}, (1,4): {2:4}, (1,5): {0:5},
    (2,1): {1:5}, (2,2): {1:4}, (2,4): {1:4}, (2,6): {0:6}
    # Note: This is a simplification/partial mapping for demonstration.
}
# Paper experimental settings
MAX_C_default = 1500
POP = 60
INIT_RATIO = (0.3,0.3,0.4)
Er = 0.15
ATTR0 = 1.0
G = 1.0
K_UPPER = 0.15
K_LOWER = 0.05
SA_T0 = 99
SA_DECAY = 0.01
ITER_LIMIT = 80  # simplified
SIM_DAYS = 1  # number of days to average - we scale simulation time to minutes
SIM_H_PER_DAY = 24
# In paper simulation stats: 30×15h then take daily average; here simplify: run long enough for steady-state
SIM_TIME_MIN = 30 * 15 * 60  # paper used 30*15h; this is a long sim; for speed we scale down in tests

# To keep runtime reasonable for demonstration, use smaller sim time by default:
SIM_TIME_MIN = 8 * 60  # 8 hours simulation in minutes for quick runs

# -----------------------
# Helper functions
# -----------------------
def sample_processing_rate(line_idx, mach_idx):
    low, high = CAPACITY_BOUNDS[line_idx][mach_idx]
    # choose a processing rate (units per minute) uniformly in range
    return RNG.uniform(low, high)

def exp_time_from_rate(rate):
    # processing time (min) for one unit given capacity units/min
    return 1.0 / rate

def sample_failure_interval(line_idx, mach_idx):
    # MTBF in hours -> convert to minutes
    return RNG.exponential(MTBF_hours[line_idx][mach_idx] * 60.0)

def sample_repair_time(line_idx, mach_idx):
    return RNG.exponential(MTTR_min[line_idx][mach_idx])

# -----------------------
# SimPy Model
# -----------------------
class Machine:
    def __init__(self, env, name, line_idx, mach_idx, capacity_rate, mtbf_h, mttr_min):
        self.env = env
        self.name = name
        self.line_idx = line_idx
        self.mach_idx = mach_idx
        self.rate = capacity_rate  # units/min
        self.mtbf_h = mtbf_h
        self.mttr_min = mttr_min
        self.busy = False
        self.total_busy_time = 0.0
        self.last_start = None
        self.operational = True
        self.process = env.process(self.run())
        # requests come from upstream buffer via a store/queue; we will model externally

    def run(self):
        # placeholder; actual processing happens via process_item() called by line logic
        while True:
            yield self.env.timeout(1)  # idle loop; real work triggered externally

class Buffer:
    def __init__(self, capacity):
        self.capacity = capacity
        self.storage = deque()

    def put(self, item):
        if len(self.storage) < self.capacity:
            self.storage.append(item)
            return True
        return False

    def get(self):
        if self.storage:
            return self.storage.popleft()
        return None

    def is_full(self):
        return len(self.storage) >= self.capacity

    def is_empty(self):
        return len(self.storage) == 0

    def level(self):
        return len(self.storage)

# For simplicity, we model each line as a sequence of machines with buffers between them.
# Items flow through; failures are treated as random interruptions causing machine to be down for MTTR sample.
class FactorySim:
    def __init__(self, env, transfer_rule, buffer_caps_per_line, transfer_rates):
        """
        transfer_rule: bool, whether allow transfer between lines when one buffer full and other empty
        buffer_caps_per_line: list of list: [line][buffer_idx] -> capacity (int)
        transfer_rates: dict mapping (line, buffer_idx) -> fraction [0,1] (max share to transfer)
        """
        self.env = env
        self.transfer_rule = transfer_rule
        self.buffer_caps = buffer_caps_per_line
        self.transfer_rates = transfer_rates
        # build lines
        self.lines = []
        self.buffers = {}  # (line, buffer_idx) -> Buffer
        self.machines = {}  # (line, mach_idx) -> dict of status
        # statistics
        self.completed = []  # list of (time, line, product, revenue)
        self.machine_busy_time = defaultdict(float)
        self.machine_work_start = {}
        self.transfer_count = 0
        self.transfer_cost = 0.0
        self.processed_count = 0

        # initialize structures
        for li in range(NUM_LINES):
            # set buffers between machines: buffer i is after machine i (i=0..7 for M1..M8)
            line_buffers = []
            for b in range(NUM_BUFFERS):
                cap = max(1, int(buffer_caps_per_line[li][b]))
                buf = Buffer(capacity=cap)
                self.buffers[(li,b)] = buf
                line_buffers.append(buf)
            # machines
            line_machs = {}
            for m in range(NUM_MACHINES):
                rate = sample_processing_rate(li, m)
                # we won't create Machine active processes; will simulate service time per item
                line_machs[m] = {
                    'rate': rate,
                    'up': True,
                    'next_fail': self.env.now + sample_failure_interval(li,m),
                    'repair_until': None,
                    'mtbf_h': MTBF_hours[li][m],
                    'mttr_min': MTTR_min[li][m]
                }
            self.machines[li] = line_machs

        # schedule failures events
        self.env.process(self.failure_manager())

        # start feeders: random product arrivals at line head (paper: first machine never starves)
        for li in range(NUM_LINES):
            self.env.process(self.line_process(li))

    def failure_manager(self):
        # periodic check for failures (sample exponential intervals)
        while True:
            # find soonest failure among machines
            soonest_time = None
            soonest = None
            for li in range(NUM_LINES):
                for mi in range(NUM_MACHINES):
                    st = self.machines[li][mi]
                    if st['next_fail'] is not None:
                        if soonest_time is None or st['next_fail'] < soonest_time:
                            soonest_time = st['next_fail']
                            soonest = (li,mi)
            if soonest_time is None:
                yield self.env.timeout(1.0)
            else:
                wait = max(0.0, soonest_time - self.env.now)
                yield self.env.timeout(wait)
                li,mi = soonest
                st = self.machines[li][mi]
                # if already under repair skip
                if not st['up']:
                    st['next_fail'] = self.env.now + sample_failure_interval(li,mi)
                    continue
                # machine fails
                st['up'] = False
                repair_time = sample_repair_time(li,mi)
                st['repair_until'] = self.env.now + repair_time
                # schedule restoration
                self.env.process(self._restore_machine(li,mi, repair_time))

    def _restore_machine(self, li, mi, repair_time):
        yield self.env.timeout(repair_time)
        st = self.machines[li][mi]
        st['up'] = True
        st['next_fail'] = self.env.now + sample_failure_interval(li,mi)
        st['repair_until'] = None

    def line_process(self, line_idx):
        """Continuously try to process items through the line."""
        # initial fill: to avoid starving first machine, we put infinite source -> directly processed when ready
        # We'll handle processing as sequential steps: M1..M9. After Mi finishes, it attempts to put item into buffer i (if i<9),
        # if buffer full, it blocks (simulated by waiting).
        while True:
            # create a new item at head (paper: initial supply sufficient; assign random product type)
            prod = RNG.choice(PRODUCT_TYPES)
            # process through machines
            blocked = False
            for m in range(NUM_MACHINES):
                st = self.machines[line_idx][m]
                # if machine down, wait until repair
                while not st['up']:
                    yield self.env.timeout(1.0)
                # processing time sample: use exponential around mean service time (1/rate)
                mean_service = exp_time_from_rate(st['rate'])
                service_time = RNG.exponential(mean_service)
                # simulate processing
                start = self.env.now
                yield self.env.timeout(service_time)
                self.processed_count += 1
                # finish: if m < 8 push into buffer m
                if m < NUM_MACHINES - 1:
                    buf = self.buffers[(line_idx, m)]
                    # if buffer full -> check transfer rules: may attempt transfer to other lines
                    if buf.is_full():
                        # attempt cross-line transfer if enabled
                        transferred = False
                        if self.transfer_rule:
                            # look for lines j != line_idx where same buffer index is empty and allowed by transfer_rate
                            for other in range(NUM_LINES):
                                if other == line_idx: continue
                                other_buf = self.buffers[(other, m)]
                                if other_buf.is_empty():
                                    # check transfer rate allowed
                                    tr_key = (line_idx, m)
                                    tr_rate = self.transfer_rates.get(tr_key, 0.0)
                                    # implement a probabilistic transfer according to tr_rate
                                    if tr_rate > 0 and RNG.rand() < tr_rate:
                                        # perform transfer: cost depends on mapping
                                        # Here we model transferring 1 unit
                                        cost_map = TRANSFER_COST.get((line_idx, m), {})
                                        cost = cost_map.get(other, 0.0)
                                        self.transfer_cost += cost
                                        self.transfer_count += 1
                                        # deliver to other buffer
                                        other_buf.put((prod, self.env.now))
                                        transferred = True
                                        break
                        if not transferred:
                            # block until buffer has space; we model blocking by waiting a unit time and rechecking
                            while buf.is_full():
                                yield self.env.timeout(0.5)
                            buf.put((prod, self.env.now))
                    else:
                        buf.put((prod, self.env.now))
                else:
                    # final machine: completed product leaves system
                    revenue = PRODUCT_REVENUE[prod]
                    self.completed.append((self.env.now, line_idx, prod, revenue))
            # small delay between new items to avoid infinite tight loop (model throughput via service rates)
            yield self.env.timeout(0.0001)

    def report(self):
        total_rev = sum(r for (_,_,_,r) in self.completed)
        total_completed = len(self.completed)
        # estimate utilization approximate: processed_count / (sum of capacity * sim_time)
        sim_minutes = self.env.now
        # compute avg nominal capacity (sum rates)
        total_nominal_cap = 0.0
        for li in range(NUM_LINES):
            for mi in range(NUM_MACHINES):
                total_nominal_cap += self.machines[li][mi]['rate']
        # nominal possible processed units if always busy = total_nominal_cap * sim_minutes
        nominal_units = total_nominal_cap * sim_minutes
        utilization = (self.processed_count / nominal_units) if nominal_units > 0 else 0.0
        # throughput per line (completed per minute)
        q_per_line = defaultdict(float)
        for t,li,prod,r in self.completed:
            q_per_line[li] += 1.0
        q_line = {li: q_per_line[li] / sim_minutes for li in range(NUM_LINES)}
        avg_throughput = np.mean(list(q_line.values()))
        return {
            'revenue': total_rev,
            'completed': total_completed,
            'transfer_count': self.transfer_count,
            'transfer_cost': self.transfer_cost,
            'utilization': utilization,
            'avg_throughput': avg_throughput,
            'q_line': q_line
        }

# -----------------------
# Optimization (simplified FA + SA)
# -----------------------
def random_buffer_allocation(maxC, num_lines=NUM_LINES, num_buffers=NUM_BUFFERS):
    # generate integer capacities per buffer such that total sum <= maxC
    # start with equal split then random perturb
    base = maxC // (num_lines * num_buffers)
    caps = [[base for _ in range(num_buffers)] for _ in range(num_lines)]
    rem = maxC - base * num_lines * num_buffers
    # distribute remainder randomly
    while rem > 0:
        i = RNG.randint(0, num_lines)
        j = RNG.randint(0, num_buffers)
        caps[i][j] += 1
        rem -= 1
    return caps

def random_transfer_rates():
    # produce transfer rates for (line,buffer) in [0,1], here sparse: only some buffers allowed
    tr = {}
    for li in range(NUM_LINES):
        for b in range(NUM_BUFFERS):
            tr[(li,b)] = float(RNG.rand() * 0.5)  # keep <=0.5 for stability
    return tr

def evaluate_solution(buffer_caps, transfer_rates, transfer_rule=True, sim_time=SIM_TIME_MIN, runs=3):
    """Run multiple simulation replications and average numeric results safely."""
    metrics = []
    for r in range(runs):
        env = simpy.Environment()
        sim = FactorySim(env, transfer_rule, buffer_caps, transfer_rates)
        env.run(until=sim_time)
        metrics.append(sim.report())

    df = pd.DataFrame(metrics)

    # 只对数值列求均值
    numeric_df = df.select_dtypes(include=[np.number])
    mean = numeric_df.mean().to_dict()

    # 对 q_line 这种 dict 列单独处理
    if 'q_line' in df.columns:
        q_lines = [x for x in df['q_line'] if isinstance(x, dict)]
        if q_lines:
            q_df = pd.DataFrame(q_lines)
            mean['q_line'] = q_df.mean().to_dict()

    return mean


def simplified_fa_optimize(maxC=MAX_C_default, iters=ITER_LIMIT, pop=POP, scheme='shared'):
    """
    Simplified Firefly-like optimizer:
    - Initialize population of buffer allocations (three init strategies)
    - For each candidate, do a quick SA-based local search on transfer rates
    - Evaluate with simulation, use 'brightness' = revenue (primary) and throughput (secondary)
    - Move population towards better solutions by replacing worse with mutated variants
    This is a heavily simplified approximation of C-MOHFA but captures the two-level search.
    """
    # initialize population
    pop_caps = []
    pop_tr = []
    # three init strategies mix
    n1 = int(pop * INIT_RATIO[0])
    n2 = int(pop * INIT_RATIO[1])
    n3 = pop - n1 - n2
    for _ in range(n1):
        pop_caps.append(random_buffer_allocation(maxC))
    for _ in range(n2):
        # heuristic: allocate proportionally to upstream capacity sums
        caps = [[0]*NUM_BUFFERS for _ in range(NUM_LINES)]
        total_weights = 0.0
        weights = []
        for li in range(NUM_LINES):
            w = sum(sum(b) for b in CAPACITY_BOUNDS[li])
            weights.append(w)
            total_weights += w
        # distribute maxC by weights across lines and buffers evenly
        for li in range(NUM_LINES):
            for b in range(NUM_BUFFERS):
                caps[li][b] = int((weights[li]/total_weights) * maxC / NUM_BUFFERS)
        # correct remainder
        s = sum(sum(row) for row in caps)
        rem = maxC - s
        while rem>0:
            i = RNG.randint(0, NUM_LINES)
            j = RNG.randint(0, NUM_BUFFERS)
            caps[i][j]+=1; rem-=1
        pop_caps.append(caps)
    for _ in range(n3):
        # production-rate-based initialization
        caps = [[0]*NUM_BUFFERS for _ in range(NUM_LINES)]
        for li in range(NUM_LINES):
            # use average upstream rates
            for b in range(NUM_BUFFERS):
                avg_rate = np.mean([ (low+high)/2 for (low,high) in CAPACITY_BOUNDS[li] ])
                caps[li][b] = int(avg_rate)  # rough
        s = sum(sum(row) for row in caps)
        # scale to maxC
        scale = maxC / s if s>0 else 1.0
        for li in range(NUM_LINES):
            for b in range(NUM_BUFFERS):
                caps[li][b] = max(1, int(caps[li][b] * scale))
        # fix remainder
        s = sum(sum(row) for row in caps)
        rem = maxC - s
        while rem>0:
            i = RNG.randint(0, NUM_LINES)
            j = RNG.randint(0, NUM_BUFFERS)
            caps[i][j]+=1; rem-=1
        pop_caps.append(caps)

    # initial transfer rates
    for _ in range(pop):
        if scheme == 'shared':
            pop_tr.append(random_transfer_rates())
        else:
            # if scheme is 'no-share' use zero rates
            pop_tr.append({(li,b):0.0 for li in range(NUM_LINES) for b in range(NUM_BUFFERS)})

    # evaluate initial population (quick low-run eval)
    pop_scores = []
    for i in range(pop):
        m = evaluate_solution(pop_caps[i], pop_tr[i], transfer_rule=(scheme=='shared'), sim_time=SIM_TIME_MIN, runs=1)
        # brightness: tuple (revenue, throughput) - we maximize revenue primarily
        pop_scores.append((m['revenue'], m['avg_throughput'], m))

    best = max(pop_scores, key=lambda x: (x[0], x[1]))

    # main iterations: simple replace-worse-with-mutant
    for it in range(iters):
        # pick two random individuals i,j
        i = RNG.randint(0,pop)
        j = RNG.randint(0,pop)
        if i==j: continue
        # create mutant of j: mutate buffer capacities slightly and local-search transfer rates via SA
        new_caps = [row[:] for row in pop_caps[j]]  # deep-ish copy
        # mutate: move some capacity units between buffers
        for _m in range(int(max(1, maxC*0.01))):
            a_line = RNG.randint(0, NUM_LINES)
            a_buf = RNG.randint(0, NUM_BUFFERS)
            b_line = RNG.randint(0, NUM_LINES)
            b_buf = RNG.randint(0, NUM_BUFFERS)
            if new_caps[a_line][a_buf] > 1:
                new_caps[a_line][a_buf] -= 1
                new_caps[b_line][b_buf] += 1
        # local SA on transfer rates
        candidate_tr = dict(pop_tr[j])
        T = SA_T0
        for _sa in range(6):  # few SA steps
            # neighbor: tweak some transfer rates
            nbr = dict(candidate_tr)
            for _k in range(5):
                li = RNG.randint(0,NUM_LINES)
                b = RNG.randint(0,NUM_BUFFERS)
                key = (li,b)
                nbr[key] = min(1.0, max(0.0, nbr.get(key,0.0) + RNG.normal(0,0.05)))
            # evaluate nbr
            res_nbr = evaluate_solution(new_caps, nbr, transfer_rule=(scheme=='shared'), sim_time=SIM_TIME_MIN, runs=1)
            res_cur = evaluate_solution(new_caps, candidate_tr, transfer_rule=(scheme=='shared'), sim_time=SIM_TIME_MIN, runs=1)
            # acceptance
            delta = res_nbr['revenue'] - res_cur['revenue']
            if delta > 0 or math.exp(delta / max(1e-9, T)) > RNG.rand():
                candidate_tr = nbr
            T = T * (1 - SA_DECAY)
        # evaluate new solution
        new_metric = evaluate_solution(new_caps, candidate_tr, transfer_rule=(scheme=='shared'), sim_time=SIM_TIME_MIN, runs=1)
        # compare with worst of (i,j). If new better than worst, replace
        worse_idx = i if (pop_scores[i][0], pop_scores[i][1]) < (pop_scores[j][0], pop_scores[j][1]) else j
        if (new_metric['revenue'], new_metric['avg_throughput']) > (pop_scores[worse_idx][0], pop_scores[worse_idx][1]):
            pop_caps[worse_idx] = new_caps
            pop_tr[worse_idx] = candidate_tr
            pop_scores[worse_idx] = (new_metric['revenue'], new_metric['avg_throughput'], new_metric)
            # update best
            best = max(pop_scores, key=lambda x: (x[0], x[1]))
        # optionally print progress
        if it % 10 == 0:
            print(f"Iter {it}: current best revenue={best[0]:.1f}, throughput={best[1]:.4f}")
    # final evaluation: run best solution multi-run
    best_idx = max(range(pop), key=lambda idx: (pop_scores[idx][0], pop_scores[idx][1]))
    best_caps = pop_caps[best_idx]
    best_tr = pop_tr[best_idx]
    final_metrics = evaluate_solution(best_caps, best_tr, transfer_rule=(scheme=='shared'), sim_time=SIM_TIME_MIN, runs=3)
    return {'caps': best_caps, 'tr': best_tr, 'metrics': final_metrics}

# -----------------------
# Quick runner to reproduce three schemes
# -----------------------
def run_three_schemes(maxC=1500):
    print("Running scheme 1 (Shared WIP + simplified C-MOHFA)...")
    res1 = simplified_fa_optimize(maxC=maxC, iters=40, pop=30, scheme='shared')
    print("Scheme1 result:", res1['metrics'])
    print("Running scheme 2 (Baseline FA)...")
    res2 = simplified_fa_optimize(maxC=maxC, iters=40, pop=30, scheme='baseline')
    print("Scheme2 result:", res2['metrics'])
    print("Running scheme 3 (C-MOHFA but no sharing)...")
    res3 = simplified_fa_optimize(maxC=maxC, iters=40, pop=30, scheme='no-share')
    print("Scheme3 result:", res3['metrics'])
    return res1, res2, res3

if __name__ == "__main__":
    t0 = time.time()
    r1, r2, r3 = run_three_schemes(maxC=300)  # for quick demo use smaller maxC
    t1 = time.time()
    print("Total time:", t1 - t0)
