"""
增强版强化学习算法实现模块

强化学习是一种通过与环境交互学习最优策略的机器学习方法。
在排班问题中，我们使用Q学习算法来学习最优的排班策略。

增强版强化学习算法包含以下改进：
1. 经验回放机制
2. 优先级经验回放
3. 双Q网络
4. 探索-利用平衡策略
5. 奖励塑形
"""

import random
import numpy as np
import logging
from datetime import datetime
import copy
from collections import deque


class EnhancedRLOptimizer:
    """增强版强化学习优化器"""

    def __init__(
        self,
        learning_rate=0.1,
        discount_factor=0.9,
        exploration_rate=0.3,
        exploration_decay=0.995,
        min_exploration_rate=0.01,
        max_episodes=500,
        memory_size=1000,
        batch_size=32,
        update_target_freq=10,
        use_double_q=True,
        use_prioritized_replay=True,
    ):
        """初始化增强版强化学习优化器

        Args:
            learning_rate: 学习率
            discount_factor: 折扣因子
            exploration_rate: 探索率
            exploration_decay: 探索率衰减
            min_exploration_rate: 最小探索率
            max_episodes: 最大训练回合数
            memory_size: 经验回放内存大小
            batch_size: 批量大小
            update_target_freq: 目标网络更新频率
            use_double_q: 是否使用双Q网络
            use_prioritized_replay: 是否使用优先级经验回放
        """
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_rate = exploration_rate
        self.exploration_decay = exploration_decay
        self.min_exploration_rate = min_exploration_rate
        self.max_episodes = max_episodes
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.update_target_freq = update_target_freq
        self.use_double_q = use_double_q
        self.use_prioritized_replay = use_prioritized_replay

        # 初始化经验回放内存
        self.memory = deque(maxlen=memory_size)

        # 初始化优先级经验回放内存
        self.priorities = deque(maxlen=memory_size)

        # 初始化Q表
        self.q_table = {}

        # 初始化目标Q表（用于双Q网络）
        self.target_q_table = {}

        self.logger = logging.getLogger(__name__)

    def optimize(
        self,
        initial_solution,
        get_state_func,
        get_actions_func,
        take_action_func,
        get_reward_func,
        is_terminal_func,
        context=None,
    ):
        """使用增强版强化学习算法优化解决方案

        Args:
            initial_solution: 初始解决方案
            get_state_func: 获取状态的函数，接收一个解决方案并返回状态表示
            get_actions_func: 获取可用动作的函数，接收一个状态并返回可用动作列表
            take_action_func: 执行动作的函数，接收一个解决方案、一个动作和上下文，返回新的解决方案
            get_reward_func: 获取奖励的函数，接收一个解决方案并返回奖励值
            is_terminal_func: 判断是否为终止状态的函数，接收一个状态并返回布尔值
            context: 上下文信息，传递给take_action_func

        Returns:
            优化后的解决方案
        """
        self.logger.info("开始使用增强版强化学习算法优化解决方案...")
        start_time = datetime.now()

        # 初始化最佳解
        best_solution = copy.deepcopy(initial_solution)
        best_reward = get_reward_func(best_solution)

        # 训练循环
        for episode in range(self.max_episodes):
            # 重置环境
            current_solution = copy.deepcopy(initial_solution)
            current_state = get_state_func(current_solution)

            # 记录当前回合的总奖励
            total_reward = 0

            # 回合内循环
            step = 0
            while (
                not is_terminal_func(current_state) and step < 100
            ):  # 限制每回合的最大步数
                # 获取可用动作
                available_actions = get_actions_func(current_state)

                # 探索-利用平衡
                if random.random() < self.exploration_rate:
                    # 探索：随机选择动作
                    action = random.choice(available_actions)
                else:
                    # 利用：选择Q值最大的动作
                    state_key = self._get_state_key(current_state)
                    if state_key not in self.q_table:
                        self.q_table[state_key] = {a: 0.0 for a in available_actions}

                    # 选择Q值最大的动作
                    action = max(
                        available_actions,
                        key=lambda a: self.q_table[state_key].get(a, 0.0),
                    )

                # 执行动作
                next_solution = take_action_func(current_solution, action, context)
                next_state = get_state_func(next_solution)

                # 获取奖励
                reward = get_reward_func(next_solution)
                total_reward += reward

                # 判断是否为终止状态
                done = is_terminal_func(next_state)

                # 存储经验
                self._remember(current_state, action, reward, next_state, done)

                # 从经验回放内存中学习
                if len(self.memory) > self.batch_size:
                    self._learn_from_memory()

                # 更新目标Q表
                if self.use_double_q and step % self.update_target_freq == 0:
                    self.target_q_table = copy.deepcopy(self.q_table)

                # 更新当前状态和解决方案
                current_state = next_state
                current_solution = next_solution

                # 更新最佳解
                if reward > best_reward:
                    best_solution = copy.deepcopy(next_solution)
                    best_reward = reward
                    self.logger.info(
                        f"第 {episode} 回合第 {step} 步发现更好的解，奖励: {best_reward}"
                    )

                step += 1

            # 衰减探索率
            self.exploration_rate = max(
                self.min_exploration_rate,
                self.exploration_rate * self.exploration_decay,
            )

            # 每10回合输出一次日志
            if episode % 10 == 0:
                self.logger.info(
                    f"第 {episode} 回合完成，总奖励: {total_reward}，最佳奖励: {best_reward}，探索率: {self.exploration_rate:.4f}"
                )

        end_time = datetime.now()
        optimization_time = (end_time - start_time).total_seconds()
        self.logger.info(
            f"增强版强化学习算法优化完成，耗时 {optimization_time:.2f} 秒，最终奖励: {best_reward}"
        )

        return best_solution

    def _get_state_key(self, state):
        """将状态转换为字符串键

        Args:
            state: 状态

        Returns:
            状态键
        """
        if isinstance(state, (list, tuple, np.ndarray)):
            return str(state)
        return state

    def _remember(self, state, action, reward, next_state, done):
        """存储经验

        Args:
            state: 当前状态
            action: 执行的动作
            reward: 获得的奖励
            next_state: 下一个状态
            done: 是否为终止状态
        """
        # 计算优先级（初始优先级为1.0）
        priority = 1.0

        # 存储经验
        self.memory.append((state, action, reward, next_state, done))
        self.priorities.append(priority)

    def _learn_from_memory(self):
        """从经验回放内存中学习"""
        # 选择批量样本
        if self.use_prioritized_replay:
            # 使用优先级经验回放
            priorities = np.array(self.priorities)
            probabilities = priorities / np.sum(priorities)
            indices = np.random.choice(
                len(self.memory), self.batch_size, p=probabilities
            )
            batch = [self.memory[i] for i in indices]
        else:
            # 使用普通经验回放
            batch = random.sample(self.memory, self.batch_size)

        for state, action, reward, next_state, done in batch:
            state_key = self._get_state_key(state)
            next_state_key = self._get_state_key(next_state)

            # 初始化状态的Q值
            if state_key not in self.q_table:
                self.q_table[state_key] = {}

            # 计算目标Q值
            if done:
                target = reward
            else:
                if next_state_key not in self.q_table:
                    self.q_table[next_state_key] = {}

                if self.use_double_q:
                    # 使用双Q网络
                    if next_state_key not in self.target_q_table:
                        self.target_q_table[next_state_key] = {}

                    # 使用当前Q网络选择动作
                    best_action = max(
                        self.q_table[next_state_key].keys(),
                        key=lambda a: self.q_table[next_state_key].get(a, 0.0),
                    )

                    # 使用目标Q网络评估动作
                    target = reward + self.discount_factor * self.target_q_table[
                        next_state_key
                    ].get(best_action, 0.0)
                else:
                    # 使用单Q网络
                    target = (
                        reward
                        + self.discount_factor
                        * max(self.q_table[next_state_key].values())
                        if self.q_table[next_state_key]
                        else reward
                    )

            # 更新Q值
            if action not in self.q_table[state_key]:
                self.q_table[state_key][action] = 0.0

            # 计算TD误差
            td_error = target - self.q_table[state_key][action]

            # 更新Q值
            self.q_table[state_key][action] += self.learning_rate * td_error

            # 更新优先级（如果使用优先级经验回放）
            if self.use_prioritized_replay:
                # 使用TD误差的绝对值作为优先级
                priority = abs(td_error) + 0.01  # 添加小常数避免优先级为0
                self.priorities[
                    self.memory.index((state, action, reward, next_state, done))
                ] = priority
