#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：t3.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/8 20:12 
'''
import numpy as np
import torch
import random
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from collections import deque


# 1. Neural Network Model (Deep Q Network)
class DQN(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 64)
        self.fc3 = nn.Linear(64, action_dim)

    def forward(self, state):
        x = torch.relu(self.fc1(state))
        x = torch.relu(self.fc2(x))
        q_values = self.fc3(x)
        return q_values


# 2. Environment Class (FJSP Problem Simulation)
class FJSPEnvironment:
    def __init__(self, n_jobs: int, n_machines: int, job_durations: np.ndarray, max_steps: int):
        self.n_jobs = n_jobs
        self.n_machines = n_machines
        self.job_durations = job_durations
        self.max_steps = max_steps
        self.reset()

    def reset(self):
        # Reset the environment state.
        self.current_step = 0
        self.job_status = np.zeros(
            (self.n_jobs, self.n_operations))  # Job status: 0 means not started, 1 means completed
        self.machine_status = np.zeros(self.n_machines)  # Machine status: 0 means idle, 1 means busy
        return self.job_status.flatten()

    def step(self, action: int):
        # Execute the scheduling action, choose job and machine.
        job_id = action // self.n_machines
        operation_id = action % self.n_operations
        machine_id = action // self.n_operations

        # Check if the operation can be processed on this machine
        if self.job_status[job_id, operation_id] == 0 and self.machine_status[machine_id] == 0:
            self.job_status[job_id, operation_id] = 1  # Mark job operation as started
            self.machine_status[machine_id] = 1  # Mark machine as busy
            reward = -self.job_durations[job_id, operation_id, machine_id]  # Negative reward, time consumption
        else:
            reward = -10  # Negative reward if operation can't be executed

        self.current_step += 1
        done = self.current_step >= self.max_steps
        return self.job_status.flatten(), reward, done


# 3. DQN Training Class
class DQNAgent:
    def __init__(self, state_dim, action_dim):
        self.action_dim = action_dim
        self.memory = deque(maxlen=10000)
        self.model = DQN(state_dim, action_dim)
        self.target_model = DQN(state_dim, action_dim)
        self.update_target_model()
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
        self.gamma = 0.99  # Reward discount factor
        self.epsilon = 1.0  # Exploration rate
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.batch_size = 32

    def update_target_model(self):
        # Update the target model.
        self.target_model.load_state_dict(self.model.state_dict())

    def act(self, state):
        # Choose an action based on epsilon-greedy strategy.
        if np.random.rand() <= self.epsilon:
            return random.randint(0, self.action_dim - 1)  # Random action
        state_tensor = torch.tensor(state, dtype=torch.float32)
        q_values = self.model(state_tensor)
        return torch.argmax(q_values).item()

    def remember(self, state, action, reward, next_state, done):
        # Store experience in the memory.
        self.memory.append((state, action, reward, next_state, done))

    def replay(self):
        # Sample from memory and perform experience replay.
        if len(self.memory) < self.batch_size:
            return
        minibatch = random.sample(self.memory, self.batch_size)
        for state, action, reward, next_state, done in minibatch:
            state_tensor = torch.tensor(state, dtype=torch.float32)
            next_state_tensor = torch.tensor(next_state, dtype=torch.float32)
            target = reward
            if not done:
                target = reward + self.gamma * torch.max(self.target_model(next_state_tensor))
            target_f = self.model(state_tensor)
            target_f[action] = target
            loss = torch.mean((target_f - self.model(state_tensor)) ** 2)

            # Update model
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

        # Epsilon decay
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay


# 4. Main Training Process
def train_agent():
    env = FJSPEnvironment(n_jobs=5, n_machines=3, max_steps=100)  # Set environment parameters
    state_dim = env.n_jobs * env.n_machines  # State dimension
    action_dim = env.n_jobs * env.n_machines  # Action dimension (each operation and machine combination is an action)

    agent = DQNAgent(state_dim, action_dim)
    episodes = 1000  # Total training episodes
    for e in range(episodes):
        state = env.reset()  # Initialize environment
        total_reward = 0
        for _ in range(env.max_steps):
            action = agent.act(state)
            next_state, reward, done = env.step(action)
            agent.remember(state, action, reward, next_state, done)
            agent.replay()  # Train model
            state = next_state
            total_reward += reward
            if done:
                break
        agent.update_target_model()
        print(f"Episode {e + 1}/{episodes}, Total Reward: {total_reward}")

        if e % 100 == 0:  # Print learning status every 100 episodes
            env.render()

    print("Training completed.")


# 5. Testing
if __name__ == "__main__":
    train_agent()  # Train the agent
