#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T3.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/9 19:40
遗传规划算法求解组合调度规则
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Genetic Programming for Composite Dispatching Rules on FJSP
- Individual = expression tree (priority function)
- Terminals: features from candidate (j,k,m) & state (pt, jr, mr, rem_work, rem_ops, due, slack, now, const)
- Function set: +, -, *, protected_div, min, max, abs, neg
- Fitness: makespan from a full greedy schedule using the rule (lower is better)
- Seeding: include classic rules (SPT/LPT/FIFO/EDD/CR/MWKR/LWKR/MOPNR) as initial individuals
- Elitism + tournament selection + subtree crossover + subtree mutation
- Plot: best makespan per generation (convergence curve)
"""

import math
import random
from dataclasses import dataclass
from typing import Dict, List, Tuple, Callable, Any, Optional
import numpy as np
import matplotlib.pyplot as plt

# ------------------------------
# Problem Definition (FJSP)
# ------------------------------

@dataclass
class Operation:
    # map: machine_id -> processing_time
    machine_time: Dict[int, int]

@dataclass
class Job:
    ops: List[Operation]
    due: float  # due date for job-level features (EDD/CR/slack)

@dataclass
class Problem:
    jobs: List[Job]
    m: int  # number of machines

    @property
    def n_jobs(self):
        return len(self.jobs)

    @property
    def n_ops_total(self):
        return sum(len(j.ops) for j in self.jobs)

    def max_ops_per_job(self):
        return max(len(j.ops) for j in self.jobs) if self.jobs else 0


def build_demo_problem(n_jobs: int = 12, m: int = 5, ops_per_job: int = 4, seed: int = 7) -> Problem:
    """
    Random FJSP instance:
    - Each operation can run on 1-2 machines, processing time 1..9
    - Job due date = sum of op minimal times + random slack
    """
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        est_total = 0
        for _o in range(ops_per_job):
            k = 2 if m >= 2 else 1
            machines = sorted(rng.sample(range(m), k=k))
            mt = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(mt))
            est_total += min(mt.values())
        due = est_total + rng.randint(5, 20)
        jobs.append(Job(ops=ops, due=float(due)))
    return Problem(jobs=jobs, m=m)

# ------------------------------
# GP Expression Tree
# ------------------------------

def pdiv(a: float, b: float) -> float:
    # protected division
    return a / (abs(b) + 1e-6)

# Function set: name -> (arity, func)
FUNCS: Dict[str, Tuple[int, Callable[..., float]]] = {
    "add": (2, lambda a,b: a+b),
    "sub": (2, lambda a,b: a-b),
    "mul": (2, lambda a,b: a*b),
    "div": (2, pdiv),            # protected division
    "min": (2, lambda a,b: a if a<b else b),
    "max": (2, lambda a,b: a if a>b else b),
    "abs": (1, lambda a: abs(a)),
    "neg": (1, lambda a: -a),
}

# Terminal keys (features)
# Features per candidate (j,k,m) and current state:
# pt:       processing time on machine m for op k of job j
# jr:       job ready time (when job j can start next op)
# mr:       machine ready time for machine m
# now:      earliest candidate start among all candidates in the current decision step
# rem_work: sum of minimal processing times of remaining ops of job j
# rem_ops:  number of remaining ops for job j
# due:      job due date
# slack:    due - start (start is candidate's start time)
TERMS_KEYS = ["pt", "jr", "mr", "now", "rem_work", "rem_ops", "due", "slack"]

class Node:
    def eval(self, ctx: Dict[str, float]) -> float:
        raise NotImplementedError
    def copy(self) -> "Node":
        raise NotImplementedError
    def size(self) -> int:
        raise NotImplementedError
    def depth(self) -> int:
        raise NotImplementedError
    def __str__(self) -> str:
        raise NotImplementedError

class FuncNode(Node):
    def __init__(self, name: str, children: List[Node]):
        self.name = name
        self.arity, self.fn = FUNCS[name]
        self.children = children
        assert len(children) == self.arity
    def eval(self, ctx: Dict[str, float]) -> float:
        vals = [c.eval(ctx) for c in self.children]
        return float(self.fn(*vals))
    def copy(self) -> "FuncNode":
        return FuncNode(self.name, [c.copy() for c in self.children])
    def size(self) -> int:
        return 1 + sum(c.size() for c in self.children)
    def depth(self) -> int:
        return 1 + max(c.depth() for c in self.children)
    def __str__(self) -> str:
        if self.arity == 1:
            return f"{self.name}({self.children[0]})"
        else:
            return f"({self.name} {self.children[0]} {self.children[1]})"

class TermNode(Node):
    def __init__(self, key: Optional[str] = None, const: Optional[float] = None):
        # 只能二选一：key 或 const
        assert (key is not None) ^ (const is not None)
        self.key, self.const = key, const
    def eval(self, ctx: Dict[str, float]) -> float:
        return float(ctx[self.key]) if self.key is not None else float(self.const)
    def copy(self) -> "TermNode":
        return TermNode(self.key, self.const)
    def depth(self) -> int:
        return 1
    def __str__(self) -> str:
        return self.key if self.key is not None else f"{self.const:.3f}"


def random_terminal(rng: random.Random) -> Node:
    # with some prob, make ERC (ephemeral random constant)
    if rng.random() < 0.2:
        return TermNode(const=rng.uniform(-2.0, 2.0))
    key = rng.choice(TERMS_KEYS)
    return TermNode(key=key)

def random_function(rng: random.Random) -> str:
    return rng.choice(list(FUNCS.keys()))

def grow_random_tree(rng: random.Random, max_depth: int) -> Node:
    """Grow method: randomly pick func/term with depth control."""
    if max_depth <= 1 or rng.random() < 0.3:
        return random_terminal(rng)
    name = random_function(rng)
    arity, _ = FUNCS[name]
    return FuncNode(name, [grow_random_tree(rng, max_depth-1) for _ in range(arity)])

def full_random_tree(rng: random.Random, max_depth: int) -> Node:
    """Full method: functions until last level, then terminals."""
    if max_depth <= 1:
        return random_terminal(rng)
    name = random_function(rng)
    arity, _ = FUNCS[name]
    return FuncNode(name, [full_random_tree(rng, max_depth-1) for _ in range(arity)])

def random_tree(rng: random.Random, max_depth: int) -> Node:
    return grow_random_tree(rng, max_depth) if rng.random() < 0.5 else full_random_tree(rng, max_depth)

def iter_nodes(root: Node):
    # yield (parent, index_in_parent, node)
    yield None, None, root
    if isinstance(root, FuncNode):
        for i, c in enumerate(root.children):
            for tup in iter_nodes(c):
                # patch parent
                parent, idx, node = tup
                if parent is None and idx is None and node is c:
                    yield root, i, c
                else:
                    yield tup

def subtree_crossover(rng: random.Random, a: Node, b: Node, max_depth: int) -> Tuple[Node, Node]:
    a2, b2 = a.copy(), b.copy()
    # collect candidate crossover points
    a_nodes = [(p,i,n) for (p,i,n) in iter_nodes(a2)]
    b_nodes = [(p,i,n) for (p,i,n) in iter_nodes(b2)]
    pA,iA,nA = rng.choice(a_nodes)
    pB,iB,nB = rng.choice(b_nodes)
    # swap if depth permits
    def depth_of_with_replacement(parent: Optional[FuncNode], idx: Optional[int], new: Node, root: Node) -> int:
        if parent is None:
            return new.depth()
        # simulate replacement computing depth from root (conservative bound by recomputing)
        if parent is not None:
            parent.children[idx] = new
            d = root.depth()
            parent.children[idx] = nA if root is a2 else nB  # revert in caller
            return d
        return root.depth()

    # replace in copies
    if pA is None:
        candA = nB.copy()
        if candA.depth() <= max_depth:
            a2 = candA
    else:
        old = pA.children[iA]
        pA.children[iA] = nB.copy()
        if a2.depth() > max_depth:
            pA.children[iA] = old  # revert

    if pB is None:
        candB = nA.copy()
        if candB.depth() <= max_depth:
            b2 = candB
    else:
        old = pB.children[iB]
        pB.children[iB] = nA.copy()
        if b2.depth() > max_depth:
            pB.children[iB] = old

    return a2, b2

def subtree_mutation(rng: random.Random, root: Node, max_depth: int, p_point: float = 0.2) -> Node:
    out = root.copy()
    nodes = [(p,i,n) for (p,i,n) in iter_nodes(out)]
    pN, iN, node = rng.choice(nodes)
    # point mutation if terminal constant
    if isinstance(node, TermNode) and node.const is not None and rng.random() < p_point:
        node.const += rng.uniform(-1.0, 1.0)
        return out
    # else replace subtree
    new_sub = random_tree(rng, max_depth=max(2, rng.randint(2, max_depth)))
    if pN is None:
        return new_sub if new_sub.depth() <= max_depth else out
    old = pN.children[iN]
    pN.children[iN] = new_sub
    if out.depth() > max_depth:
        pN.children[iN] = old
    return out

# ------------------------------
# Classic rule seeds as trees
# ------------------------------

def term(name: str) -> Node: return TermNode(key=name)
def const(c: float) -> Node: return TermNode(const=c)
def f(name: str, *chs: Node) -> Node: return FuncNode(name, list(chs))

def seed_rules() -> List[Node]:
    # SPT: score = pt
    spt = term("pt")
    # LPT: score = -pt
    lpt = f("neg", term("pt"))
    # FIFO: job ready time small first
    fifo = term("jr")
    # EDD: earliest due date
    edd = term("due")
    # CR: (due - now)/rem_work
    cr = f("div", f("sub", term("due"), term("now")), f("add", term("rem_work"), const(1e-3)))
    # MWKR: maximize remaining work -> minimize (-rem_work)
    mwkr = f("neg", term("rem_work"))
    # LWKR: minimize remaining work
    lwkr = term("rem_work")
    # MOPNR: more remaining ops is better -> minimize (-(rem_ops))
    mopnr = f("neg", term("rem_ops"))
    return [spt, lpt, fifo, edd, cr, mwkr, lwkr, mopnr]

# ------------------------------
# Scheduling Simulator (Greedy by rule)
# ------------------------------

def evaluate_rule_on_problem(rule: Node, pb: Problem, rng: random.Random) -> float:
    """
    Build a full schedule greedily:
    at each step, enumerate all feasible (j,k,m),
    compute a priority score by rule, pick the smallest score, schedule at earliest start.
    Return makespan.
    """
    n_jobs, m = pb.n_jobs, pb.m
    next_op = np.zeros(n_jobs, dtype=np.int32)
    job_ready = np.zeros(n_jobs, dtype=np.float32)
    machine_ready = np.zeros(m, dtype=np.float32)
    # precompute remaining work (sum of minimal times of remaining ops)
    def remaining_work(j, from_k):
        return float(sum(min(op.machine_time.values()) for op in pb.jobs[j].ops[from_k:]))
    rem_work = np.array([remaining_work(j, 0) for j in range(n_jobs)], dtype=np.float32)
    makespan = 0.0
    done_ops = 0
    total_ops = pb.n_ops_total

    while done_ops < total_ops:
        # build candidate list
        cands = []
        for j in range(n_jobs):
            k = int(next_op[j])
            if k >= len(pb.jobs[j].ops):
                continue
            op = pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(machine_ready[mm]), float(job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        if not cands:
            # no candidate -> deadlock (shouldn't happen), jump time
            tmin = min(list(machine_ready) + list(job_ready))
            machine_ready[:] = np.maximum(machine_ready, tmin)
            job_ready[:] = np.maximum(job_ready, tmin)
            continue

        # compute "now" as earliest candidate start in this step
        now = min(c[4] for c in cands)

        # choose candidate with minimal score
        best = None
        best_score = float("inf")
        for (j, k, mm, pt, start, end) in cands:
            ctx = {
                "pt": pt,
                "jr": float(job_ready[j]),
                "mr": float(machine_ready[mm]),
                "now": now,
                "rem_work": float(rem_work[j]),
                "rem_ops": float(len(pb.jobs[j].ops) - k),
                "due": float(pb.jobs[j].due),
                "slack": float(pb.jobs[j].due - start),
            }
            score = rule.eval(ctx)
            # tie-break: smaller end then smaller start
            if (score < best_score) or (abs(score - best_score) < 1e-9 and end < best[5]) or \
               (abs(score - best_score) < 1e-9 and abs(end - best[5]) < 1e-9 and start < best[4]):
                best = (j, k, mm, pt, start, end)
                best_score = score

        # schedule best
        j, k, mm, pt, start, end = best
        machine_ready[mm] = end
        job_ready[j] = end
        # update remaining work
        rem_work[j] = max(0.0, rem_work[j] - min(pb.jobs[j].ops[k].machine_time.values()))
        next_op[j] += 1
        done_ops += 1
        makespan = max(makespan, end)

    return float(makespan)

# ------------------------------
# GP (GA) Framework
# ------------------------------

@dataclass
class Individual:
    tree: Node
    fitness: float

def tournament_select(rng: random.Random, pop: List[Individual], k: int = 3) -> Individual:
    cand = rng.sample(pop, k)
    return min(cand, key=lambda ind: ind.fitness)

def gp_run(
    rng_seed: int = 42,
    pop_size: int = 60,
    gens: int = 30,
    max_depth: int = 6,
    tourn_k: int = 3,
    cx_rate: float = 0.9,
    mut_rate: float = 0.2,
    elitism: int = 2,
    train_instances: int = 1,  # number of random FJSP instances2 to average fitness on
    n_jobs: int = 12, m: int = 5, ops_per_job: int = 4,
):
    rng = random.Random(rng_seed)

    # build training set (can be multiple instances2 to avoid overfitting)
    problems = [build_demo_problem(n_jobs=n_jobs, m=m, ops_per_job=ops_per_job, seed=rng.randint(0, 10_000))
                for _ in range(train_instances)]

    # init population: seeds + random
    pop: List[Individual] = []
    for t in seed_rules():
        fit = np.mean([evaluate_rule_on_problem(t, pb, rng) for pb in problems])
        pop.append(Individual(tree=t, fitness=fit))
    while len(pop) < pop_size:
        t = random_tree(rng, max_depth=max_depth)
        fit = np.mean([evaluate_rule_on_problem(t, pb, rng) for pb in problems])
        pop.append(Individual(tree=t, fitness=fit))

    # record convergence
    best_hist = []
    best = min(pop, key=lambda ind: ind.fitness)

    print(f"Init best makespan: {best.fitness:.3f}  rule={best.tree}")

    for g in range(1, gens+1):
        new_pop: List[Individual] = []

        # Elitism
        elites = sorted(pop, key=lambda ind: ind.fitness)[:elitism]
        new_pop.extend([Individual(e.tree.copy(), e.fitness) for e in elites])

        # Reproduction
        while len(new_pop) < pop_size:
            p1 = tournament_select(rng, pop, tourn_k)
            if rng.random() < cx_rate:
                p2 = tournament_select(rng, pop, tourn_k)
                c1_tree, c2_tree = subtree_crossover(rng, p1.tree, p2.tree, max_depth=max_depth)
            else:
                c1_tree, c2_tree = p1.tree.copy(), tournament_select(rng, pop, tourn_k).tree.copy()

            if rng.random() < mut_rate:
                c1_tree = subtree_mutation(rng, c1_tree, max_depth=max_depth)
            if rng.random() < mut_rate:
                c2_tree = subtree_mutation(rng, c2_tree, max_depth=max_depth)

            # evaluate children
            c1_fit = np.mean([evaluate_rule_on_problem(c1_tree, pb, rng) for pb in problems])
            new_pop.append(Individual(c1_tree, c1_fit))
            if len(new_pop) < pop_size:
                c2_fit = np.mean([evaluate_rule_on_problem(c2_tree, pb, rng) for pb in problems])
                new_pop.append(Individual(c2_tree, c2_fit))

        pop = new_pop
        cur_best = min(pop, key=lambda ind: ind.fitness)
        if cur_best.fitness < best.fitness:
            best = Individual(cur_best.tree.copy(), cur_best.fitness)

        best_hist.append(best.fitness)
        print(f"Gen {g:3d}: best makespan = {best.fitness:.3f}  rule={best.tree}")

    return best, best_hist

# ------------------------------
# Plot convergence & Demo
# ------------------------------

def plot_convergence(best_hist: List[float], title: str = "GP convergence"):
    xs = np.arange(1, len(best_hist)+1)
    plt.figure(figsize=(7,4))
    plt.plot(xs, best_hist, marker="o")
    plt.xlabel("Generation")
    plt.ylabel("Best makespan")
    plt.title(title)
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plt.show()

def main():
    # 超参可以根据规模/时间调整：
    # - pop_size: 40~120
    # - gens: 20~80
    # - max_depth: 4~8
    # - train_instances: 1~3（多个实例平均，有助于泛化，花时更长）
    best, hist = gp_run(
        rng_seed=42,
        pop_size=60,
        gens=100,
        max_depth=5,
        tourn_k=3,
        cx_rate=0.9,
        mut_rate=0.25,
        elitism=2,
        train_instances=1,
        n_jobs=12, m=5, ops_per_job=4,
    )

    print("\n=== Best Composite Rule ===")
    print(best.tree)
    print(f"Best makespan: {best.fitness:.3f}")
    plot_convergence(hist, title="GP for Composite Dispatching Rules (Best Makespan per Gen)")

if __name__ == "__main__":
    main()
