#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：DP2.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/29 19:56
深度强化学习
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from collections import deque
import matplotlib.pyplot as plt


class FJSPEnvironment:
    """柔性作业车间调度环境"""

    def __init__(self, num_jobs=5, num_machines=4, max_operations=3):
        self.num_jobs = num_jobs
        self.num_machines = num_machines
        self.max_operations = max_operations

        # 生成随机问题实例
        self.jobs = self.generate_problem()
        self.reset()

    def generate_problem(self):
        """生成随机FJSP问题"""
        jobs = []
        for job_id in range(self.num_jobs):
            job = []
            num_operations = random.randint(2, self.max_operations)
            for op_id in range(num_operations):
                # 每个工序可以在多台机器上加工，加工时间不同
                available_machines = random.sample(
                    range(self.num_machines),
                    random.randint(1, self.num_machines)
                )
                processing_times = {
                    machine: random.randint(1, 10)
                    for machine in available_machines
                }
                job.append({
                    'operation_id': op_id,
                    'available_machines': available_machines,
                    'processing_times': processing_times
                })
            jobs.append(job)
        return jobs

    def reset(self):
        """重置环境"""
        # 机器状态: [当前时间, 是否空闲]
        self.machines = [{'current_time': 0, 'idle': True} for _ in range(self.num_machines)]

        # 作业状态: [当前工序, 完成状态, 开始时间, 结束时间]
        self.job_states = [
            {
                'current_operation': 0,
                'completed': False,
                'start_time': 0,
                'end_time': 0
            } for _ in range(self.num_jobs)
        ]

        # 调度序列
        self.schedule = []
        self.current_time = 0
        self.steps = 0

        return self.get_state()

    def get_state(self):
        """获取环境状态"""
        state = []

        # 机器状态特征
        for machine in self.machines:
            state.extend([
                machine['current_time'] / 100.0,  # 归一化
                1.0 if machine['idle'] else 0.0
            ])

        # 作业状态特征
        for job_state in self.job_states:
            job_id = self.job_states.index(job_state)
            current_op = job_state['current_operation']

            if job_state['completed']:
                state.extend([0.0, 0.0, 1.0])  # 已完成
            else:
                # 当前工序信息
                operation = self.jobs[job_id][current_op]
                avg_processing_time = np.mean(list(operation['processing_times'].values()))

                state.extend([
                    current_op / self.max_operations,  # 归一化工序进度
                    avg_processing_time / 10.0,  # 归一化处理时间
                    0.0  # 未完成
                ])

        # 时间特征
        state.append(self.current_time / 100.0)

        return np.array(state, dtype=np.float32)

    def step(self, action):
        """执行动作"""
        job_id, machine_id = action

        reward = 0
        done = False

        if not self.job_states[job_id]['completed']:
            current_op_idx = self.job_states[job_id]['current_operation']
            operation = self.jobs[job_id][current_op_idx]

            if machine_id in operation['available_machines']:
                # 计算开始时间
                start_time = max(
                    self.machines[machine_id]['current_time'],
                    self.job_states[job_id]['end_time']
                )

                processing_time = operation['processing_times'][machine_id]
                end_time = start_time + processing_time

                # 更新机器状态
                self.machines[machine_id]['current_time'] = end_time
                self.machines[machine_id]['idle'] = False

                # 更新作业状态
                self.job_states[job_id]['start_time'] = start_time
                self.job_states[job_id]['end_time'] = end_time
                self.job_states[job_id]['current_operation'] += 1

                # 检查作业是否完成
                if self.job_states[job_id]['current_operation'] >= len(self.jobs[job_id]):
                    self.job_states[job_id]['completed'] = True

                # 记录调度
                self.schedule.append({
                    'job_id': job_id,
                    'operation': current_op_idx,
                    'machine': machine_id,
                    'start_time': start_time,
                    'end_time': end_time
                })

                # 奖励: 负的完成时间 (鼓励减少总完成时间)
                reward = -processing_time / 10.0

                # 更新当前时间
                self.current_time = max(self.current_time, end_time)

        self.steps += 1

        # 检查是否所有作业都完成
        all_completed = all(job['completed'] for job in self.job_states)
        if all_completed or self.steps >= 100:
            done = True
            # 最终奖励基于总完成时间
            makespan = self.current_time
            reward = -makespan / 100.0

        return self.get_state(), reward, done, {}


class DQNNetwork(nn.Module):
    """深度Q网络"""

    def __init__(self, state_size, action_size, hidden_size=128):
        super(DQNNetwork, self).__init__()
        self.fc1 = nn.Linear(state_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, hidden_size)
        self.output = nn.Linear(hidden_size, action_size)

        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.relu(self.fc3(x))
        return self.output(x)


class DQNAgent:
    """DQN智能体"""

    def __init__(self, state_size, action_size, num_jobs, num_machines):
        self.state_size = state_size
        self.action_size = action_size
        self.num_jobs = num_jobs
        self.num_machines = num_machines
        self.memory = deque(maxlen=10000)
        self.gamma = 0.95  # 折扣因子
        self.epsilon = 1.0  # 探索率
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.learning_rate = 0.001

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = DQNNetwork(state_size, action_size).to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)

    def remember(self, state, action, reward, next_state, done):
        """存储经验"""
        self.memory.append((state, action, reward, next_state, done))

    def act(self, state, valid_actions):
        """选择动作"""
        if np.random.random() <= self.epsilon:
            # 随机选择有效动作
            return random.choice(valid_actions)
        else:
            state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
            q_values = self.model(state)
            q_values = q_values.detach().cpu().numpy()[0]

            # 只考虑有效动作
            valid_q_values = []
            for action in valid_actions:
                action_idx = action[0] * self.num_machines + action[1]
                valid_q_values.append((action, q_values[action_idx]))

            # 选择Q值最大的动作
            best_action = max(valid_q_values, key=lambda x: x[1])[0]
            return best_action

    def replay(self, batch_size=32):
        """经验回放"""
        if len(self.memory) < batch_size:
            return

        batch = random.sample(self.memory, batch_size)

        # 修复张量创建问题
        states = np.array([e[0] for e in batch])
        states = torch.FloatTensor(states).to(self.device)

        actions = [e[1][0] * self.num_machines + e[1][1] for e in batch]
        actions = torch.LongTensor(actions).to(self.device)

        rewards = torch.FloatTensor([e[2] for e in batch]).to(self.device)

        next_states = np.array([e[3] for e in batch])
        next_states = torch.FloatTensor(next_states).to(self.device)

        dones = torch.BoolTensor([e[4] for e in batch]).to(self.device)

        current_q_values = self.model(states).gather(1, actions.unsqueeze(1))
        next_q_values = self.model(next_states).max(1)[0].detach()
        target_q_values = rewards + (self.gamma * next_q_values * ~dones)

        loss = nn.MSELoss()(current_q_values.squeeze(), target_q_values)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay


# 训练过程
def train_fjsp_dqn():
    env = FJSPEnvironment(num_jobs=5, num_machines=4)
    state_size = len(env.get_state())
    action_size = env.num_jobs * env.num_machines

    # 修复：传递num_jobs和num_machines参数
    agent = DQNAgent(state_size, action_size, env.num_jobs, env.num_machines)
    episodes = 500
    scores = []

    for episode in range(episodes):
        state = env.reset()
        total_reward = 0
        done = False

        while not done:
            # 获取有效动作 (未完成的作业 × 所有机器)
            valid_actions = []
            for job_id in range(env.num_jobs):
                if not env.job_states[job_id]['completed']:
                    current_op = env.job_states[job_id]['current_operation']
                    if current_op < len(env.jobs[job_id]):
                        available_machines = env.jobs[job_id][current_op]['available_machines']
                        for machine_id in available_machines:
                            valid_actions.append((job_id, machine_id))

            if not valid_actions:
                break

            action = agent.act(state, valid_actions)
            next_state, reward, done, _ = env.step(action)

            agent.remember(state, action, reward, next_state, done)
            state = next_state
            total_reward += reward

        agent.replay()
        scores.append(total_reward)

        if episode % 50 == 0:
            print(f"Episode {episode}, Score: {total_reward:.2f}, Epsilon: {agent.epsilon:.2f}")

    # 绘制训练曲线
    plt.figure(figsize=(10, 6))
    plt.plot(scores)
    plt.title('DQN Training Performance for FJSP')
    plt.xlabel('Episode')
    plt.ylabel('Score')
    plt.grid(True)
    plt.show()

    # 输出最终调度结果
    print("\nFinal Schedule:")
    for schedule in env.schedule:
        print(f"Job {schedule['job_id']}, Operation {schedule['operation']}, "
              f"Machine {schedule['machine']}, Time: {schedule['start_time']}-{schedule['end_time']}")

    print(f"\nTotal Makespan: {env.current_time}")

    return env, agent


# 运行训练
if __name__ == "__main__":
    env, agent = train_fjsp_dqn()