#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：QLearning2.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/10 21:39 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FJSP with Tabular Q-Learning (20 jobs × 4 ops/job × 5 machines)
- 动作：单一派工规则（SPT/LPT/FIFO/EDD/CR/MWKR/LWKR/MOPNR）
- 状态：离散的 next_op 向量（每个作业下一道工序索引的元组）
- 奖励：r = -(Δmakespan) - 0.01
- 输出：Q 表（打印+CSV），学习曲线（回报/完工期），最终贪心策略的调度结果

注意：20×4×5 的表格型 Q 学习**状态空间非常大**（最多 5^20 种组合，虽然可达状态远少于此），
代码可运行但训练时间会随 episode 增加而增长且内存占用随访问状态增多而增加。
如需更高效率/更强效果，建议用函数逼近（DQN/PPO/GNN 等）。
"""

import csv
import random
from dataclasses import dataclass
from typing import Dict, List, Tuple
import numpy as np
import matplotlib.pyplot as plt

# ======================
# 问题定义
# ======================

@dataclass
class Operation:
    machine_time: Dict[int, int]  # 可行机器及加工时间

@dataclass
class Job:
    ops: List[Operation]
    due: float  # 交期（用于 EDD/CR 等）

@dataclass
class Problem:
    jobs: List[Job]
    m: int  # 机器数

    @property
    def n_jobs(self): return len(self.jobs)

    @property
    def n_ops_total(self): return sum(len(j.ops) for j in self.jobs)

    def max_ops_per_job(self): return max((len(j.ops) for j in self.jobs), default=0)


def build_demo_problem(n_jobs: int = 20, m: int = 5, ops_per_job: int = 4, seed: int = 7) -> Problem:
    """
    随机生成 FJSP 实例：
    - 每道工序可在 1~2 台机器上加工，时间 1..9
    - 交期 = 剩余工序最短时间和 + 随机裕量
    """
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        est = 0
        for _o in range(ops_per_job):
            k = 2 if m >= 2 else 1
            machines = sorted(rng.sample(range(m), k=k))
            mt = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(mt))
            est += min(mt.values())
        due = est + rng.randint(5, 20)
        jobs.append(Job(ops=ops, due=float(due)))
    return Problem(jobs=jobs, m=m)

# ======================
# 规则（作为动作）
# ======================

RULES = ["SPT", "LPT", "FIFO", "EDD", "CR", "MWKR", "LWKR", "MOPNR"]

def rule_score(rule: str, *, pt: float, jr: float, mr: float, now: float,
               rem_work: float, rem_ops: int, due: float, start: float, end: float) -> float:
    """候选优先级评分（越小越好）"""
    if rule == "SPT":   # 最短加工时间
        return pt
    if rule == "LPT":   # 最长加工时间（取负转成最小化）
        return -pt
    if rule == "FIFO":  # 作业就绪时间越早越优
        return jr
    if rule == "EDD":   # 最早交期
        return due
    if rule == "CR":    # (due - now) / rem_work
        denom = max(rem_work, 1e-6)
        return (due - now) / denom
    if rule == "MWKR":  # 剩余工作量最多（取负）
        return -rem_work
    if rule == "LWKR":  # 剩余工作量最少
        return rem_work
    if rule == "MOPNR": # 剩余工序数最多（取负）
        return -rem_ops
    return pt

# ======================
# 环境（动作=规则 → 具体调度）
# ======================

class FJSPRuleEnv:
    """
    状态：离散元组 state = tuple(next_op[j] for j)
    动作：在 RULES 中选一条规则；环境据此从全部可行候选 (j,k,m) 中挑最佳并调度
    奖励：r = -(makespan - old_makespan) - 0.01
    终止：所有工序均已调度
    """
    def __init__(self, pb: Problem):
        self.pb = pb
        self.n_jobs, self.m = pb.n_jobs, pb.m
        self.max_ops = pb.max_ops_per_job()
        self.reset()

    def reset(self):
        self.next_op = np.zeros(self.n_jobs, dtype=np.int32)
        self.job_ready = np.zeros(self.n_jobs, dtype=np.float32)
        self.machine_ready = np.zeros(self.m, dtype=np.float32)
        self.makespan = 0.0
        self.done_ops = 0
        # 估计剩余工作量（每个作业剩余工序的最短可能时间之和）
        self.rem_work = np.array([
            sum(min(op.machine_time.values()) for op in job.ops)
            for job in self.pb.jobs
        ], dtype=np.float32)
        self.assign: Dict[Tuple[int,int], Tuple[int,float,float]] = {}  # (j,k)->(m,s,e)
        return self._state_key()

    def _state_key(self):
        # 表格型 Q-Learning 的离散状态键
        return tuple(int(x) for x in self.next_op.tolist())

    def _candidates(self):
        cands = []  # (j, k, mm, pt, start, end)
        for j in range(self.n_jobs):
            k = int(self.next_op[j])
            if k >= len(self.pb.jobs[j].ops):  # 该作业完成
                continue
            op = self.pb.jobs[j].ops[k]
            for mm, pt in op.machine_time.items():
                start = max(float(self.machine_ready[mm]), float(self.job_ready[j]))
                end = start + float(pt)
                cands.append((j, k, mm, float(pt), start, end))
        return cands

    def step(self, action_rule_id: int):
        rule = RULES[action_rule_id]
        cands = self._candidates()
        if not cands:
            # 基本不会发生；若发生视为终止
            return self._state_key(), -0.5, True, {}

        now = min(c[4] for c in cands)

        # 依据规则选分数最小的候选；平分用 end、start 再次打破
        best, best_score = None, float("inf")
        for (j, k, mm, pt, start, end) in cands:
            rem_ops = len(self.pb.jobs[j].ops) - k
            s = rule_score(
                rule, pt=pt, jr=float(self.job_ready[j]), mr=float(self.machine_ready[mm]),
                now=now, rem_work=float(self.rem_work[j]), rem_ops=rem_ops,
                due=float(self.pb.jobs[j].due), start=start, end=end
            )
            if (s < best_score) or (abs(s - best_score) < 1e-9 and end < (best[5] if best else 1e18)) or \
               (abs(s - best_score) < 1e-9 and best and abs(end - best[5]) < 1e-9 and start < best[4]):
                best, best_score = (j, k, mm, pt, start, end), s

        j, k, mm, pt, start, end = best
        old_mk = self.makespan

        # 执行调度
        self.machine_ready[mm] = end
        self.job_ready[j] = end
        self.assign[(j, k)] = (mm, start, end)
        self.makespan = max(self.makespan, end)
        self.next_op[j] += 1
        # 更新剩余工作量估计（减去本道工序最短可行时间）
        min_time_this_op = min(self.pb.jobs[j].ops[k].machine_time.values())
        self.rem_work[j] = max(0.0, self.rem_work[j] - float(min_time_this_op))
        self.done_ops += 1

        reward = -(self.makespan - old_mk) - 0.01
        done = self.done_ops == self.pb.n_ops_total
        return self._state_key(), reward, done, {}

# ======================
# Q-Learning（表格）
# ======================

class QTable:
    """Q 表：dict[state(tuple)] -> np.array(|A|)"""
    def __init__(self, n_actions: int):
        self.n_actions = n_actions
        self.table: Dict[Tuple[int,...], np.ndarray] = {}

    def get(self, state: Tuple[int, ...]) -> np.ndarray:
        if state not in self.table:
            self.table[state] = np.zeros(self.n_actions, dtype=np.float32)
        return self.table[state]

    def update(self, s: Tuple[int,...], a: int, target: float, alpha: float):
        qsa = self.get(s)
        qsa[a] = (1 - alpha) * qsa[a] + alpha * target

    def greedy_action(self, s: Tuple[int,...]) -> int:
        qsa = self.get(s)
        return int(np.argmax(qsa))

    def epsilon_greedy(self, s: Tuple[int,...], eps: float) -> int:
        if np.random.rand() < eps:
            return int(np.random.randint(self.n_actions))
        return self.greedy_action(s)

    def items(self):
        return self.table.items()

def train_q_learning(env: FJSPRuleEnv,
                     episodes: int = 1500,
                     alpha: float = 0.2,
                     gamma: float = 0.99,
                     eps_start: float = 1.0,
                     eps_end: float = 0.05,
                     eps_decay: float = 0.997):
    q = QTable(n_actions=len(RULES))
    eps = eps_start
    ep_returns, ep_makespans = [], []

    for ep in range(1, episodes + 1):
        s = env.reset()
        done = False
        total_r = 0.0
        while not done:
            a = q.epsilon_greedy(s, eps)
            s2, r, done, _ = env.step(a)
            total_r += r

            # Q-learning 更新
            best_next = np.max(q.get(s2)) if not done else 0.0
            target = r + gamma * best_next
            q.update(s, a, target, alpha)
            s = s2

        ep_returns.append(total_r)
        ep_makespans.append(env.makespan)
        eps = max(eps_end, eps * eps_decay)

        if ep % 50 == 0 or ep == 1:
            print(f"[EP {ep:4d}] return={total_r:9.3f}  makespan={env.makespans[-1] if 'makespans' in locals() else env.makespan:7.2f}  eps={eps:5.3f}")

    return q, ep_returns, ep_makespans

# ======================
# 可视化 & 导出
# ======================

def plot_learning_curves(ep_returns: List[float], ep_makespans: List[float]):
    xs = np.arange(1, len(ep_returns)+1)
    plt.figure(figsize=(10,4))
    # 左：回报
    plt.subplot(1,2,1)
    plt.plot(xs, ep_returns)
    plt.xlabel("Episode"); plt.ylabel("Return")
    plt.title("Q-Learning: Episode Return")
    plt.grid(True, alpha=0.3)
    # 右：完工期
    plt.subplot(1,2,2)
    plt.plot(xs, ep_makespans)
    plt.xlabel("Episode"); plt.ylabel("Makespan")
    plt.title("Q-Learning: Episode Makespan")
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plt.show()

def save_q_table_csv(q: QTable, path: str = "q_table.csv"):
    """保存 Q 表为 CSV：列为 state, 以及每个动作（规则）的 Q 值"""
    with open(path, "w", newline="", encoding="utf-8") as f:
        writer = csv.writer(f)
        header = ["state"] + RULES
        writer.writerow(header)
        # 为便于阅读，对状态按字符串排序
        for state, qvals in sorted(q.items(), key=lambda kv: str(kv[0])):
            row = [str(state)] + [f"{v:.6f}" for v in qvals.tolist()]
            writer.writerow(row)
    print(f"Q-table saved to: {path}")

def evaluate_greedy_policy(env: FJSPRuleEnv, q: QTable):
    s = env.reset()
    while True:
        a = q.greedy_action(s)
        s, r, done, _ = env.step(a)
        if done:
            break
    return env.assign, env.makespan

def print_q_table(q: QTable, max_states: int = 120):
    """
    打印 Q 表，默认最多打印 120 个状态（避免 20×4×5 时刷屏）
    """
    items = list(q.items())
    total = len(items)
    if max_states is not None:
        items = items[:max_states]
    print("\n=== Q-Table (state -> Q values per action) ===")
    print("Actions order:", RULES)
    print(f"(showing {len(items)} of {total} states)")
    for s, qvals in items:
        qlist = ", ".join(f"{v:7.3f}" for v in qvals.tolist())
        print(f"State {s} -> [{qlist}]")

def print_schedule(assign: Dict[Tuple[int,int], Tuple[int,float,float]], makespan: float):
    print(f"\nGreedy policy schedule makespan: {makespan:.2f}")
    for (j, k) in sorted(assign.keys()):
        mm, s, e = assign[(j,k)]
        print(f"Job {j:2d} Op {k:2d} -> M{mm}  start={s:7.1f}  end={e:7.1f}")

# ======================
# Main
# ======================

def main():
    # 固定规模：20×4×5
    pb = build_demo_problem(n_jobs=20, m=5, ops_per_job=4, seed=7)
    env = FJSPRuleEnv(pb)

    # 训练（回合数可根据时间调大/调小）
    q, ep_returns, ep_makespans = train_q_learning(
        env,
        episodes=1500,   # 可改 2000~5000 获得更稳定结果（训练更久）
        alpha=0.2,
        gamma=0.99,
        eps_start=1.0,
        eps_end=0.05,
        eps_decay=0.997
    )

    # 保存 & 打印 Q 表
    save_q_table_csv(q, "q_table.csv")
    print_q_table(q, max_states=120)  # 为防止输出过多，限制打印 120 个状态

    # 学习曲线
    plot_learning_curves(ep_returns, ep_makespans)

    # 用贪心策略评估并打印调度
    assign, mk = evaluate_greedy_policy(env, q)
    print_schedule(assign, mk)

if __name__ == "__main__":
    # 可复现实验
    np.random.seed(0)
    random.seed(0)
    main()
