#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：QLearning1.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/10 21:34 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FJSP with Tabular Q-Learning where actions are single dispatching rules.

- Actions: choose one rule from {SPT, LPT, FIFO, EDD, CR, MWKR, LWKR, MOPNR}
- State: tuple of each job's next-operation index (discrete), e.g., (0,1,0,2,...)
- Transition: apply the chosen rule to pick the best (job,op,machine) candidate; schedule at earliest start
- Reward: negative increment of makespan minus a small step penalty -> r = -(Δmakespan) - 0.01
- Goal: minimize makespan
- Output: print Q-table at the end; evaluate greedy policy (argmax Q) to show final schedule & makespan

Note:
- This is a pedagogical tabular Q-learning demo. State is compact (only next_op vector),
  so the same state can correspond to different time configurations. It still works reasonably
  for small/medium demos. For large instances2 you may need function approximation.
"""

import random
from dataclasses import dataclass
from typing import Dict, List, Tuple
import numpy as np

# ======================
# Problem Definition
# ======================

@dataclass
class Operation:
    machine_time: Dict[int, int]  # feasible machines and processing times

@dataclass
class Job:
    ops: List[Operation]
    due: float  # due date for rule features that use due/CR/EDD (optional)

@dataclass
class Problem:
    jobs: List[Job]
    m: int  # number of machines

    @property
    def n_jobs(self): return len(self.jobs)

    @property
    def n_ops_total(self): return sum(len(j.ops) for j in self.jobs)

    def max_ops_per_job(self): return max((len(j.ops) for j in self.jobs), default=0)


def build_demo_problem(n_jobs: int = 6, m: int = 4, ops_per_job: int = 3, seed: int = 7) -> Problem:
    """
    Build a random FJSP demo instance:
    - Each operation can run on 1-2 random machines with time in [1..9]
    - Due date ~ sum of minimal times + random slack
    """
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        est = 0
        for _o in range(ops_per_job):
            k = 2 if m >= 2 else 1
            machines = sorted(rng.sample(range(m), k=k))
            mt = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(mt))
            est += min(mt.values())
        due = est + rng.randint(5, 20)
        jobs.append(Job(ops=ops, due=float(due)))
    return Problem(jobs=jobs, m=m)

# ======================
# Rules (actions)
# ======================

RULES = ["SPT", "LPT", "FIFO", "EDD", "CR", "MWKR", "LWKR", "MOPNR"]

def rule_score(rule: str, *, pt: float, jr: float, mr: float, now: float,
               rem_work: float, rem_ops: int, due: float, start: float, end: float) -> float:
    """
    Return score for a candidate; smaller is better.
    Tie-breaks handled outside by (end,start).
    """
    if rule == "SPT":   # Shortest Processing Time
        return pt
    if rule == "LPT":   # Longest Processing Time (invert to minimize)
        return -pt
    if rule == "FIFO":  # First-In-First-Out: earlier job ready time
        return jr
    if rule == "EDD":   # Earliest Due Date
        return due
    if rule == "CR":    # Critical Ratio = (due - now) / rem_work
        denom = max(rem_work, 1e-6)
        return (due - now) / denom
    if rule == "MWKR":  # Most Work Remaining (invert to minimize)
        return -rem_work
    if rule == "LWKR":  # Least Work Remaining
        return rem_work
    if rule == "MOPNR": # Most Operations Remaining (invert to minimize)
        return -rem_ops
    return pt  # default

# ======================
# Environment (rule -> concrete scheduling)
# ======================

class FJSPRuleEnv:
    """
    State: tuple(next_op[j] for j in jobs)  (discrete)
    Action: pick a rule; environment chooses the best candidate by that rule
    Step: schedule (j,k) on machine mm at earliest start, update times
    Done: when all operations are scheduled
    Reward: r = -(makespan - old_makespan) - 0.01
    """
    def __init__(self, pb: Problem):
        self.pb = pb
        self.n_jobs, self.m = pb.n_jobs, pb.m
        self.max_ops = pb.max_ops_per_job()
        self.reset()

    def reset(self):
        self.next_op = np.zeros(self.n_jobs, dtype=np.int32)
        self.job_ready = np.zeros(self.n_jobs, dtype=np.float32)
        self.machine_ready = np.zeros(self.m, dtype=np.float32)
        self.makespan = 0.0
        self.done_ops = 0
        # remaining work estimate per job = sum of minimal times of remaining operations
        self.rem_work = np.array([
            sum(min(op.machine_time.values()) for op in job.ops)
            for job in self.pb.jobs
        ], dtype=np.float32)
        # assignment for visualization/output
        self.assign: Dict[Tuple[int,int], Tuple[int,float,float]] = {}
        return self._state_key()

    def _state_key(self):
        # Discrete state for tabular Q: tuple of next op indices
        return tuple(int(x) for x in self.next_op.tolist())

    def _candidates(self):
        """Enumerate all feasible (j,k,mm) at earliest start"""
        cands = []  # (j, k, mm, pt, start, end)
        for j in range(self.n_jobs):
            k = int(self.next_op[j])
            if k >= len(self.pb.jobs[j].ops):
                continue
            op = self.pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(self.machine_ready[mm]), float(self.job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        return cands

    def step(self, action_rule_id: int):
        rule = RULES[action_rule_id]
        cands = self._candidates()
        if not cands:
            # should not happen unless terminal; give small penalty and end
            return self._state_key(), -0.5, True, {}

        now = min(c[4] for c in cands)

        # pick candidate with minimal rule score (tie-break on end then start)
        best, best_score = None, float("inf")
        for (j, k, mm, pt, start, end) in cands:
            rem_ops = len(self.pb.jobs[j].ops) - k
            s = rule_score(
                rule, pt=pt, jr=float(self.job_ready[j]), mr=float(self.machine_ready[mm]),
                now=now, rem_work=float(self.rem_work[j]), rem_ops=rem_ops,
                due=float(self.pb.jobs[j].due), start=start, end=end
            )
            if (s < best_score) or (abs(s - best_score) < 1e-9 and end < (best[5] if best else 1e18)) or \
               (abs(s - best_score) < 1e-9 and best and abs(end - best[5]) < 1e-9 and start < best[4]):
                best, best_score = (j, k, mm, pt, start, end), s

        j, k, mm, pt, start, end = best
        old_mk = self.makespan

        # schedule
        self.machine_ready[mm] = end
        self.job_ready[j] = end
        self.assign[(j, k)] = (mm, start, end)
        self.makespan = max(self.makespan, end)
        self.next_op[j] += 1
        # update remaining work estimate (subtract minimal time of this op)
        min_time_this_op = min(self.pb.jobs[j].ops[k].machine_time.values())
        self.rem_work[j] = max(0.0, self.rem_work[j] - float(min_time_this_op))
        self.done_ops += 1

        reward = -(self.makespan - old_mk) - 0.01
        done = self.done_ops == self.pb.n_ops_total
        return self._state_key(), reward, done, {}

# ======================
# Q-Learning
# ======================

class QTable:
    """
    Q-table implemented as dict: state(tuple) -> np.array(|A|)
    """
    def __init__(self, n_actions: int):
        self.n_actions = n_actions
        self.table: Dict[Tuple[int,...], np.ndarray] = {}

    def get(self, state: Tuple[int, ...]) -> np.ndarray:
        if state not in self.table:
            self.table[state] = np.zeros(self.n_actions, dtype=np.float32)
        return self.table[state]

    def update(self, s: Tuple[int,...], a: int, target: float, alpha: float):
        qsa = self.get(s)
        qsa[a] = (1 - alpha) * qsa[a] + alpha * target

    def greedy_action(self, s: Tuple[int,...]) -> int:
        qsa = self.get(s)
        return int(np.argmax(qsa))

    def epsilon_greedy(self, s: Tuple[int,...], eps: float) -> int:
        if np.random.rand() < eps:
            return int(np.random.randint(self.n_actions))
        return self.greedy_action(s)

    def items(self):
        return self.table.items()

def train_q_learning(env: FJSPRuleEnv,
                     episodes: int = 800,
                     alpha: float = 0.2,
                     gamma: float = 0.99,
                     eps_start: float = 1.0,
                     eps_end: float = 0.05,
                     eps_decay: float = 0.997):
    q = QTable(n_actions=len(RULES))
    eps = eps_start
    ep_returns, ep_makespans = [], []

    for ep in range(1, episodes + 1):
        s = env.reset()
        done = False
        total_r = 0.0
        while not done:
            a = q.epsilon_greedy(s, eps)
            s2, r, done, _ = env.step(a)
            total_r += r

            # Q-learning update
            best_next = np.max(q.get(s2)) if not done else 0.0
            target = r + gamma * best_next
            q.update(s, a, target, alpha)

            s = s2

        ep_returns.append(total_r)
        ep_makespans.append(env.makespan)
        eps = max(eps_end, eps * eps_decay)
        if ep % 50 == 0 or ep == 1:
            print(f"[EP {ep:4d}] return={total_r:8.3f}  makespan={env.makespan:7.2f}  eps={eps:5.3f}")

    return q, ep_returns, ep_makespans

# ======================
# Evaluation helpers
# ======================

def evaluate_greedy_policy(env: FJSPRuleEnv, q: QTable):
    s = env.reset()
    while True:
        a = q.greedy_action(s)
        s, r, done, _ = env.step(a)
        if done:
            break
    return env.assign, env.makespan

def print_q_table(q: QTable, max_states: int = None):
    """
    Pretty print Q-table. If max_states is set, print at most that many states.
    """
    items = list(q.items())
    print("\n=== Q-Table (state -> Q values per action) ===")
    print("Actions order:", RULES)
    if max_states is not None:
        items = items[:max_states]
        print(f"(showing first {max_states} states out of {len(q.table)})")
    else:
        print(f"(total states: {len(q.table)})")
    for s, qvals in items:
        qlist = ", ".join(f"{v:7.3f}" for v in qvals.tolist())
        print(f"State {s} -> [{qlist}]")

def print_schedule(assign: Dict[Tuple[int,int], Tuple[int,float,float]], makespan: float):
    print(f"\nGreedy policy schedule makespan: {makespan:.2f}")
    for (j, k) in sorted(assign.keys()):
        mm, s, e = assign[(j,k)]
        print(f"Job {j:2d} Op {k:2d} -> M{mm}  start={s:6.1f}  end={e:6.1f}")

# ======================
# Main
# ======================

def main():
    # You can scale up (e.g., n_jobs=12,m=5,ops_per_job=4 or 20×4×5), but Q-table grows fast.
    # For large instances2 consider function approximation.
    pb = build_demo_problem(n_jobs=6, m=4, ops_per_job=3, seed=7)
    env = FJSPRuleEnv(pb)

    # Q-learning
    q, ep_returns, ep_makespans = train_q_learning(
        env,
        episodes=800,    # increase for better learning
        alpha=0.2,
        gamma=0.99,
        eps_start=1.0,
        eps_end=0.05,
        eps_decay=0.997
    )

    # Print Q-table (you can limit printed states by max_states)
    print_q_table(q, max_states=None)

    # Evaluate greedy policy (argmax Q)
    assign, mk = evaluate_greedy_policy(env, q)
    print_schedule(assign, mk)

if __name__ == "__main__":
    # Reproducibility
    np.random.seed(0)
    random.seed(0)
    main()
