#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：t2.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/8 16:19
DQN求解FJSP并给出每一步
'''
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from collections import deque
from typing import List, Tuple


# 1. 神经网络模型（深度Q网络）
class DQN(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 64)
        self.fc3 = nn.Linear(64, action_dim)

    def forward(self, state):
        x = torch.relu(self.fc1(state))
        x = torch.relu(self.fc2(x))
        q_values = self.fc3(x)
        return q_values


# 2. 环境类（FJSP问题模拟）
class FJSPEnvironment:
    def __init__(self, n_jobs: int, n_machines: int, max_steps: int):
        self.n_jobs = n_jobs
        self.n_machines = n_machines
        self.max_steps = max_steps  # 最大时间步数
        self.job_durations = np.random.randint(1, 10, size=(n_jobs, n_machines))  # 随机生成作业的处理时间
        self.reset()

    def reset(self):
        """重置环境状态"""
        self.current_step = 0
        self.job_status = np.zeros((self.n_jobs, self.n_machines))  # 作业状态，0代表未开始，1代表已完成
        self.machine_status = np.zeros(self.n_machines)  # 机器状态，0代表空闲，1代表忙碌
        return self.job_status.flatten()

    def step(self, action: int):
        """执行调度操作"""
        job_id = action // self.n_machines
        machine_id = action % self.n_machines

        # 如果作业可以在该机器上开始处理，更新状态
        if self.job_status[job_id, machine_id] == 0 and self.machine_status[machine_id] == 0:
            self.job_status[job_id, machine_id] = 1  # 标记作业已开始
            self.machine_status[machine_id] = 1  # 标记机器为忙碌
            reward = -self.job_durations[job_id, machine_id]  # 负奖励，时间消耗
        else:
            reward = -10  # 如果无法执行，给予负奖励
        self.current_step += 1
        done = self.current_step >= self.max_steps
        return self.job_status.flatten(), reward, done

    def render(self):
        """显示当前状态（作业状态和机器状态）"""
        print("Job Status:\n", self.job_status)
        print("Machine Status:", self.machine_status)


# 3. DQN训练类
class DQNAgent:
    def __init__(self, state_dim, action_dim):
        self.action_dim = action_dim
        self.memory = deque(maxlen=10000)
        self.model = DQN(state_dim, action_dim)
        self.target_model = DQN(state_dim, action_dim)
        self.update_target_model()
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
        self.gamma = 0.99  # 奖励折扣因子
        self.epsilon = 1.0  # 探索率
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.batch_size = 32

    def update_target_model(self):
        """更新目标模型（每隔一定步数）"""
        self.target_model.load_state_dict(self.model.state_dict())

    def act(self, state):
        """选择动作，基于epsilon-greedy策略"""
        if np.random.rand() <= self.epsilon:
            return random.randint(0, self.action_dim - 1)  # 随机选择动作
        state_tensor = torch.tensor(state, dtype=torch.float32)
        q_values = self.model(state_tensor)
        return torch.argmax(q_values).item()

    def remember(self, state, action, reward, next_state, done):
        """将经验存储到记忆池"""
        self.memory.append((state, action, reward, next_state, done))

    def replay(self):
        """从记忆池中采样，进行经验回放训练"""
        if len(self.memory) < self.batch_size:
            return
        minibatch = random.sample(self.memory, self.batch_size)
        for state, action, reward, next_state, done in minibatch:
            state_tensor = torch.tensor(state, dtype=torch.float32)
            next_state_tensor = torch.tensor(next_state, dtype=torch.float32)
            target = reward
            if not done:
                target = reward + self.gamma * torch.max(self.target_model(next_state_tensor))
            target_f = self.model(state_tensor)
            target_f[action] = target
            loss = torch.mean((target_f - self.model(state_tensor)) ** 2)

            # 更新模型
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

        # 探索率衰减
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay


# 4. 主训练过程
def train_agent():
    env = FJSPEnvironment(n_jobs=5, n_machines=3, max_steps=100)  # 设置环境参数
    state_dim = env.n_jobs * env.n_machines  # 状态维度
    action_dim = env.n_jobs * env.n_machines  # 动作维度（每个作业和机器组合一个动作）

    agent = DQNAgent(state_dim, action_dim)
    episodes = 100  # 训练总轮次
    for e in range(episodes):
        state = env.reset()  # 初始化环境
        total_reward = 0
        for _ in range(env.max_steps):
            action = agent.act(state)
            next_state, reward, done = env.step(action)
            agent.remember(state, action, reward, next_state, done)
            agent.replay()  # 训练模型
            state = next_state
            total_reward += reward
            if done:
                break
        agent.update_target_model()
        print(f"Episode {e + 1}/{episodes}, Total Reward: {total_reward}")

        if e % 100 == 0:  # 每隔100轮打印一次学习状态
            env.render()

    print("Training completed.")


# 5. 测试
if __name__ == "__main__":
    train_agent()  # 训练代理
