#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：V2 
@File    ：DT1.py
@IDE     ：PyCharm 
@Author  ：郭星
@Date    ：2025/9/10 18:41 
'''
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
import matplotlib.pyplot as plt
import random
from collections import deque

# 设置随机种子，保证结果可复现
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)


class FJSPEnvironment:
    """柔性作业车间调度环境"""

    def __init__(self, num_jobs, num_machines, max_operations=5):
        self.num_jobs = num_jobs  # 作业数量
        self.num_machines = num_machines  # 机器数量
        self.max_operations = max_operations  # 每个作业的最大工序数

        # 初始化每个作业的工序数（随机）
        self.operations_per_job = np.random.randint(1, max_operations + 1, size=num_jobs)
        self.total_operations = sum(self.operations_per_job)

        # 初始化每个工序的可选机器和加工时间
        self.processing_times = {}  # (job, op) -> {machine: time}
        self._init_processing_times()

        self.reset()

    def _init_processing_times(self):
        """初始化每个工序在不同机器上的加工时间"""
        for job in range(self.num_jobs):
            for op in range(self.operations_per_job[job]):
                # 每个工序可以在1到num_machines之间的随机数量的机器上加工
                num_possible_machines = np.random.randint(1, self.num_machines + 1)
                possible_machines = np.random.choice(
                    self.num_machines, num_possible_machines, replace=False)

                times = {}
                for machine in possible_machines:
                    # 加工时间为1到10的随机整数
                    times[machine] = np.random.randint(1, 11)

                self.processing_times[(job, op)] = times

    def reset(self):
        """重置环境到初始状态"""
        # 当前完成的工序数（对于每个作业）
        self.current_operation = np.zeros(self.num_jobs, dtype=int)

        # 机器的可用时间
        self.machine_available_time = np.zeros(self.num_machines)

        # 作业的完成时间（最后一个工序的完成时间）
        self.job_completion_time = np.zeros(self.num_jobs)

        # 已调度的工序数量
        self.scheduled_operations = 0

        # 记录调度结果
        self.schedule = []

        return self._get_state()

    def _get_state(self):
        """获取当前状态表示"""
        # 下一个待处理的工序
        next_operations = []
        for job in range(self.num_jobs):
            op = self.current_operation[job]
            if op < self.operations_per_job[job]:
                next_operations.append((job, op))

        # 标准化机器可用时间
        max_possible_time = 10 * self.max_operations  # 估计的最大可能时间
        normalized_machine_time = self.machine_available_time / max_possible_time

        # 作业完成进度
        job_progress = [self.current_operation[j] / self.operations_per_job[j]
                        for j in range(self.num_jobs)]

        # 状态向量
        state = np.concatenate([normalized_machine_time, job_progress])

        return state, next_operations

    def step(self, action):
        """执行动作并返回新状态、奖励和是否结束"""
        job, op, machine = action

        # 验证动作有效性
        if op != self.current_operation[job]:
            # 选择了错误的工序
            return self._get_state(), -100, False

        if machine not in self.processing_times[(job, op)]:
            # 选择了不能处理该工序的机器
            return self._get_state(), -100, False

        # 计算开始和结束时间
        processing_time = self.processing_times[(job, op)][machine]
        start_time = max(self.machine_available_time[machine], self.job_completion_time[job])
        end_time = start_time + processing_time

        # 更新机器可用时间
        self.machine_available_time[machine] = end_time

        # 更新作业完成时间
        self.job_completion_time[job] = end_time

        # 更新当前工序
        self.current_operation[job] += 1

        # 记录调度
        self.schedule.append((job, op, machine, start_time, end_time))

        # 更新已调度工序数量
        self.scheduled_operations += 1

        # 判断是否所有工序都已调度
        done = (self.scheduled_operations == self.total_operations)

        # 计算奖励（以最大完工时间的减少为基础）
        current_makespan = np.max(self.job_completion_time)
        if done:
            # 完成时给予较大奖励
            reward = 1000.0 / (current_makespan + 1e-6)
        else:
            # 中间步骤奖励
            reward = 10.0 / (current_makespan + 1e-6)

        return self._get_state(), reward, done

    def get_makespan(self):
        """获取最大完工时间"""
        return np.max(self.job_completion_time)


class PolicyNetwork(nn.Module):
    """策略网络，用于生成动作概率分布"""

    def __init__(self, state_dim, hidden_dim=64):
        super(PolicyNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, 1)  # 输出每个可选动作的分数

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


class FJSPAgent:
    """解决FJSP问题的强化学习智能体"""

    def __init__(self, state_dim, learning_rate=1e-3, gamma=0.99):
        self.policy_net = PolicyNetwork(state_dim)
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=learning_rate)
        self.gamma = gamma  # 折扣因子
        self.saved_log_probs = []
        self.rewards = []

    def select_action(self, state, possible_actions):
        """根据当前状态和可能的动作选择一个动作"""
        if not possible_actions:
            return None

        # 为每个可能的动作计算分数
        action_scores = []
        for action in possible_actions:
            job, op = action
            # 为每个可选机器创建状态-动作对
            for machine in env.processing_times[(job, op)]:
                # 这里简化处理，直接使用状态加上动作信息
                action_feat = np.concatenate([state, [job / 10, op / 10, machine / 10]])
                action_tensor = torch.from_numpy(action_feat).float().unsqueeze(0)
                score = self.policy_net(action_tensor)
                action_scores.append((action, machine, score))

        # 提取分数并计算概率
        scores = torch.tensor([s for _, _, s in action_scores])
        probs = F.softmax(scores, dim=0)

        # 采样选择动作
        m = Categorical(probs)
        idx = m.sample()
        self.saved_log_probs.append(m.log_prob(idx))

        # 返回选中的动作
        selected_action, selected_machine, _ = action_scores[idx.item()]
        return (selected_action[0], selected_action[1], selected_machine)

    def update_policy(self):
        """使用策略梯度更新网络"""
        R = 0
        returns = []

        # 计算折扣回报
        for r in reversed(self.rewards):
            R = r + self.gamma * R
            returns.insert(0, R)

        # 标准化回报
        returns = torch.tensor(returns)
        returns = (returns - returns.mean()) / (returns.std() + 1e-6)

        # 计算损失
        policy_loss = []
        for log_prob, R in zip(self.saved_log_probs, returns):
            policy_loss.append(-log_prob * R)

        # 反向传播和优化
        self.optimizer.zero_grad()
        total_loss = torch.cat(policy_loss).sum()
        total_loss.backward()
        self.optimizer.step()

        # 清空缓存
        del self.saved_log_probs[:]
        del self.rewards[:]

        return total_loss.item()


def generate_possible_actions(env, next_operations):
    """生成可能的动作列表"""
    possible_actions = []
    for job, op in next_operations:
        for machine in env.processing_times[(job, op)]:
            possible_actions.append((job, op, machine))
    return possible_actions


def train_agent(env, agent, episodes=1000, print_interval=100):
    """训练智能体"""
    makespan_history = []
    loss_history = []

    for episode in range(episodes):
        state, _ = env.reset()
        done = False
        total_reward = 0

        while not done:
            # 获取当前状态和可能的下一个工序
            state, next_operations = env._get_state()

            if not next_operations:
                break

            # 生成可能的动作
            possible_actions = generate_possible_actions(env, next_operations)

            # 选择动作
            action = agent.select_action(state, next_operations)

            if action is None:
                break

            # 执行动作
            (next_state, next_ops), reward, done = env.step(action)

            # 记录奖励
            agent.rewards.append(reward)
            total_reward += reward

            # 更新状态
            state = next_state

        # 更新策略
        loss = agent.update_policy()

        # 记录最大完工时间
        makespan = env.get_makespan()
        makespan_history.append(makespan)
        loss_history.append(loss)

        # 打印进度
        if episode % print_interval == 0:
            print(
                f"Episode {episode}/{episodes}, Makespan: {makespan:.2f}, Total Reward: {total_reward:.2f}, Loss: {loss:.4f}")

    return makespan_history, loss_history


def evaluate_agent(env, agent, num_episodes=10):
    """评估智能体性能"""
    makespans = []

    for _ in range(num_episodes):
        state, _ = env.reset()
        done = False

        while not done:
            state, next_operations = env._get_state()

            if not next_operations:
                break

            # 选择动作（确定性选择，取概率最高的动作）
            with torch.no_grad():
                action = agent.select_action(state, next_operations)

            if action is None:
                break

            (next_state, next_ops), _, done = env.step(action)
            state = next_state

        makespans.append(env.get_makespan())

    avg_makespan = np.mean(makespans)
    print(f"\nEvaluation - Average Makespan: {avg_makespan:.2f} (over {num_episodes} episodes)")
    return avg_makespan, makespans


def plot_results(makespan_history, loss_history):
    """绘制训练结果"""
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))

    # 最大完工时间曲线
    ax1.plot(makespan_history)
    ax1.set_title('Makespan over Episodes')
    ax1.set_xlabel('Episode')
    ax1.set_ylabel('Makespan')
    ax1.grid(True)

    # 损失曲线
    ax2.plot(loss_history)
    ax2.set_title('Loss over Episodes')
    ax2.set_xlabel('Episode')
    ax2.set_ylabel('Loss')
    ax2.grid(True)

    plt.tight_layout()
    plt.show()


if __name__ == "__main__":
    # 问题参数
    num_jobs = 5
    num_machines = 3
    max_operations = 3

    # 创建环境
    env = FJSPEnvironment(num_jobs, num_machines, max_operations)

    # 状态维度：机器可用时间（num_machines） + 作业进度（num_jobs）
    state_dim = num_machines + num_jobs

    # 创建智能体
    agent = FJSPAgent(state_dim, learning_rate=1e-3, gamma=0.95)

    # 训练智能体 - 修复了参数顺序，先传递env，再传递agent
    print("Starting training...")
    makespan_history, loss_history = train_agent(env, agent, episodes=500, print_interval=50)

    # 绘制训练结果
    plot_results(makespan_history, loss_history)

    # 评估智能体
    avg_makespan, makespans = evaluate_agent(env, agent, num_episodes=10)

    # 打印最终调度方案
    print("\nFinal schedule example:")
    env.reset()
    done = False
    while not done:
        state, next_operations = env._get_state()
        if not next_operations:
            break
        with torch.no_grad():
            action = agent.select_action(state, next_operations)
        if action is None:
            break
        (next_state, next_ops), _, done = env.step(action)

    for entry in env.schedule:
        job, op, machine, start, end = entry
        print(f"Job {job}, Operation {op} on Machine {machine}: Start={start}, End={end}")

    print(f"\nFinal Makespan: {env.get_makespan()}")
