#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：T1.py
@IDE     ：PyCharm 
@Author  ：郭星--深度强化学习可以出甘特图比较好的方法
深度强化学习可以出甘特图比较好的方法
@Date    ：2025/9/8 20:28 
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FJSP with Deep Q-Network (DQN) — 20 jobs × 4 ops/job × 5 machines
Usage:
  pip install torch numpy matplotlib
  python fjsp_dqn_20x4x5.py
"""

import random
from dataclasses import dataclass
from typing import Dict, List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
import matplotlib.pyplot as plt


# ------------------------------
# Problem Definition
# ------------------------------

@dataclass
class Operation:
    machine_time: Dict[int, int]  # map: machine_id -> processing_time

@dataclass
class Job:
    ops: List[Operation]

@dataclass
class Problem:
    jobs: List[Job]
    m: int  # number of machines

    @property
    def n_jobs(self):
        return len(self.jobs)

    @property
    def n_ops_total(self):
        return sum(len(j.ops) for j in self.jobs)

    def max_ops_per_job(self):
        return max(len(j.ops) for j in self.jobs)


def build_demo_problem(n_jobs: int, m: int, ops_per_job: int, seed: int = 7) -> Problem:
    """
    Build a random demo FJSP instance.
    Each operation can run on 2 randomly chosen machines with random processing times 1..9.
    """
    rng = random.Random(seed)
    jobs: List[Job] = []
    for _ in range(n_jobs):
        ops = []
        for _o in range(ops_per_job):
            machines = sorted(rng.sample(range(m), k=min(2, m)))  # each op has 1-2 feasible machines
            machine_time = {mm: rng.randint(1, 9) for mm in machines}
            ops.append(Operation(machine_time))
        jobs.append(Job(ops))
    return Problem(jobs=jobs, m=m)


# ------------------------------
# Environment
# ------------------------------

class FJSPEnv:
    """
    One action schedules exactly one operation (job j, operation k, machine m).
    Invalid actions are penalized. Valid actions schedule at earliest start time considering precedence
    and machine availability. Episode ends when all operations are scheduled.
    """
    def __init__(self, problem: Problem):
        self.pb = problem
        self.n_jobs = self.pb.n_jobs
        self.m = self.pb.m
        self.max_ops = self.pb.max_ops_per_job()
        self.action_dim = self.n_jobs * self.max_ops * self.m  # flattened (job, op, machine)
        self.reset()

    def reset(self):
        self.next_op = np.zeros(self.n_jobs, dtype=np.int32)      # next operation index per job
        self.job_ready = np.zeros(self.n_jobs, dtype=np.float32)  # ready time per job
        self.machine_ready = np.zeros(self.m, dtype=np.float32)   # ready time per machine
        self.makespan = 0.0
        self.done_ops = 0
        # (job, op) -> (machine, start, end)
        self.assign: Dict[Tuple[int, int], Tuple[int, float, float]] = {}
        return self._state()

    def _state(self) -> np.ndarray:
        # State = [machine_ready (m), job_ready (n_jobs), next_op_index (n_jobs normalized)]
        mr_max = float(np.max(self.machine_ready)) if self.machine_ready.size > 0 else 1.0
        jr_max = float(np.max(self.job_ready)) if self.job_ready.size > 0 else 1.0
        mr = self.machine_ready / (1.0 + max(mr_max, 1.0))
        jr = self.job_ready / (1.0 + max(jr_max, 1.0))
        noi = self.next_op.astype(np.float32) / max(float(self.max_ops), 1.0)
        return np.concatenate([mr, jr, noi], axis=0).astype(np.float32)

    def _decode_action(self, a: int) -> Tuple[int, int, int]:
        jm_block = self.max_ops * self.m
        j = a // jm_block
        rem = a % jm_block
        k = rem // self.m
        mm = rem % self.m
        return int(j), int(k), int(mm)

    def _encode_action(self, j: int, k: int, mm: int) -> int:
        return j * (self.max_ops * self.m) + k * self.m + mm

    def valid_action(self, j: int, k: int, mm: int) -> bool:
        if j < 0 or j >= self.n_jobs:
            return False
        if k != int(self.next_op[j]):
            return False
        if k >= len(self.pb.jobs[j].ops):
            return False
        if mm not in self.pb.jobs[j].ops[k].machine_time:
            return False
        return True

    def available_actions_mask(self) -> np.ndarray:
        mask = np.zeros(self.action_dim, dtype=np.float32)
        for j in range(self.n_jobs):
            k = int(self.next_op[j])
            if k >= len(self.pb.jobs[j].ops):
                continue
            for mm in self.pb.jobs[j].ops[k].machine_time.keys():
                mask[self._encode_action(j, k, mm)] = 1.0
        return mask

    def step(self, action: int):
        j, k, mm = self._decode_action(action)
        if not self.valid_action(j, k, mm):
            reward = -1.0
            done = self.done_ops == self.pb.n_ops_total
            return self._state(), reward, done, {}

        pt = float(self.pb.jobs[j].ops[k].machine_time[mm])
        start = max(float(self.machine_ready[mm]), float(self.job_ready[j]))
        end = start + pt

        self.machine_ready[mm] = end
        self.job_ready[j] = end
        self.assign[(j, k)] = (mm, start, end)

        old_makespan = self.makespan
        self.makespan = max(self.makespan, end)
        self.next_op[j] += 1
        self.done_ops += 1

        # shaped reward: penalize incremental makespan growth with a small step penalty
        reward = -(self.makespan - old_makespan) - 0.01
        done = self.done_ops == self.pb.n_ops_total
        return self._state(), reward, done, {}


# ------------------------------
# DQN Agent
# ------------------------------

class QNet(nn.Module):
    def __init__(self, state_dim: int, action_dim: int):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.ReLU(),
            nn.Linear(256, action_dim)
        )

    def forward(self, x):
        return self.net(x)


class DQNAgent:
    def __init__(
        self,
        state_dim: int,
        action_dim: int,
        lr: float = 1e-3,
        gamma: float = 0.99,
        eps_start: float = 1.0,
        eps_end: float = 0.05,
        eps_decay: float = 0.997,
        buffer_size: int = 150_000,
        batch_size: int = 256,
        target_tau: float = 1.0,
    ):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.q = QNet(state_dim, action_dim).to(self.device)
        self.q_target = QNet(state_dim, action_dim).to(self.device)
        self.q_target.load_state_dict(self.q.state_dict())
        self.opt = optim.Adam(self.q.parameters(), lr=lr)
        self.gamma = gamma
        self.eps = eps_start
        self.eps_end = eps_end
        self.eps_decay = eps_decay
        self.batch_size = batch_size
        self.target_tau = target_tau
        self.memory = deque(maxlen=buffer_size)

    def act(self, state: np.ndarray, valid_mask: np.ndarray) -> int:
        # epsilon-greedy with masking invalid actions
        if np.random.rand() < self.eps:
            valid_indices = np.nonzero(valid_mask)[0]
            if len(valid_indices) == 0:
                return np.random.randint(valid_mask.size)
            return int(np.random.choice(valid_indices))
        with torch.no_grad():
            s = torch.tensor(state, dtype=torch.float32, device=self.device).unsqueeze(0)
            qvals = self.q(s).squeeze(0).cpu().numpy()
            qvals[valid_mask < 0.5] = -1e9
            return int(np.argmax(qvals))

    def remember(self, s, a, r, s2, d, mask2):
        self.memory.append((s, a, r, s2, d, mask2))

    def train_step(self):
        if len(self.memory) < self.batch_size:
            return 0.0
        batch = random.sample(self.memory, self.batch_size)
        s = torch.tensor(np.stack([b[0] for b in batch]), dtype=torch.float32, device=self.device)
        a = torch.tensor([b[1] for b in batch], dtype=torch.int64, device=self.device).unsqueeze(1)
        r = torch.tensor([b[2] for b in batch], dtype=torch.float32, device=self.device).unsqueeze(1)
        s2 = torch.tensor(np.stack([b[3] for b in batch]), dtype=torch.float32, device=self.device)
        d = torch.tensor([b[4] for b in batch], dtype=torch.float32, device=self.device).unsqueeze(1)
        mask2 = torch.tensor(np.stack([b[5] for b in batch]), dtype=torch.float32, device=self.device)

        # current Q
        q_curr = self.q(s).gather(1, a)
        # target Q with mask
        with torch.no_grad():
            q_next_all = self.q_target(s2)
            very_neg = torch.full_like(q_next_all, -1e9)
            q_next_all = torch.where(mask2 > 0.5, q_next_all, very_neg)
            q_next_max = q_next_all.max(dim=1, keepdim=True).values
            target = r + (1.0 - d) * self.gamma * q_next_max

        loss = nn.MSELoss()(q_curr, target)
        self.opt.zero_grad()
        loss.backward()
        nn.utils.clip_grad_norm_(self.q.parameters(), 1.0)
        self.opt.step()

        # update target
        if self.target_tau >= 1.0:
            self.q_target.load_state_dict(self.q.state_dict())
        else:
            for tp, p in zip(self.q_target.parameters(), self.q.parameters()):
                tp.data.copy_(tp.data * (1.0 - self.target_tau) + p.data * self.target_tau)

        # epsilon decay
        self.eps = max(self.eps_end, self.eps * self.eps_decay)
        return float(loss.item())


# ------------------------------
# Training & Evaluation
# ------------------------------

def train_dqn(env: FJSPEnv, episodes: int = 1500):
    state_dim = env._state().size
    agent = DQNAgent(
        state_dim,
        env.action_dim,
        lr=1e-3,
        gamma=0.99,
        eps_start=1.0,
        eps_end=0.05,
        eps_decay=0.997,
        buffer_size=150000,
        batch_size=256,
        target_tau=1.0,
    )

    history = []
    for ep in range(1, episodes + 1):
        s = env.reset()
        mask = env.available_actions_mask()
        ep_reward = 0.0
        ep_loss = 0.0
        steps = 0
        while True:
            a = agent.act(s, mask)
            s2, r, done, _ = env.step(a)
            mask2 = env.available_actions_mask()
            agent.remember(s, a, r, s2, float(done), mask2)
            l = agent.train_step()
            ep_loss += l
            ep_reward += r
            steps += 1
            s, mask = s2, mask2
            if done:
                break
        history.append((ep, ep_reward, ep_loss, env.makespan))
        if ep % 50 == 0 or ep == 1:
            print(
                f"Episode {ep:4d} | steps={steps:4d} | reward={ep_reward:9.3f} | "
                f"loss={ep_loss:9.3f} | makespan={env.makespan:7.2f} | eps={agent.eps:5.3f}"
            )
    return agent, history


def evaluate_policy(env: FJSPEnv, agent: DQNAgent, greedy: bool = True):
    prev_eps = agent.eps
    agent.eps = 0.0 if greedy else agent.eps
    s = env.reset()
    mask = env.available_actions_mask()
    while True:
        a = agent.act(s, mask)
        s, r, done, _ = env.step(a)
        mask = env.available_actions_mask()
        if done:
            break
    agent.eps = prev_eps
    return env.assign, env.makespan, env.machine_ready.copy()


def plot_gantt(pb: Problem, assign: Dict[Tuple[int, int], Tuple[int, float, float]], title: str = "Schedule"):
    bars = {mm: [] for mm in range(pb.m)}
    for (j, k), (mm, s, e) in assign.items():
        bars[mm].append((s, e - s, f"J{j}-O{k}"))
    fig, ax = plt.subplots(figsize=(12, 5))
    yticks = []
    ylabels = []
    for idx, mm in enumerate(sorted(bars.keys())):
        items = sorted(bars[mm], key=lambda x: x[0])
        for (start, dur, label) in items:
            ax.barh(idx, dur, left=start)
            ax.text(start + dur / 2, idx, label, ha="center", va="center", fontsize=8)
        yticks.append(idx)
        ylabels.append(f"M{mm}")
    ax.set_yticks(yticks)
    ax.set_yticklabels(ylabels)
    ax.set_xlabel("Time")
    ax.set_title(title)
    plt.tight_layout()
    plt.show()


# ------------------------------
# Main
# ------------------------------

def main():
    # 20 jobs, 4 operations per job, 5 machines
    pb = build_demo_problem(n_jobs=20, m=5, ops_per_job=4, seed=7)
    env = FJSPEnv(pb)
    agent, _ = train_dqn(env, episodes=1500)

    assign, makespan, _ = evaluate_policy(env, agent, greedy=True)
    print(f"Final greedy makespan: {makespan:.2f}")
    for (j, k), (mm, s, e) in sorted(assign.items()):
        print(f"Job {j} Op {k} -> M{mm}, start={s:.1f}, end={e:.1f}")
    plot_gantt(pb, assign, title=f"FJSP DQN schedule (20×4×5, makespan={makespan:.1f})")


if __name__ == "__main__":
    main()
