from typing import List, Tuple, Dict, Any
from rainforeLearn.gomoku.v2.configs.config import GomokuDQNConfig
from rainforeLearn.gomoku.v2.train.constants.train_constants import TrainConstants


class RewardCalculator:
    """奖励计算器 - 负责各种奖励和惩罚的计算逻辑"""

    def __init__(self, config: GomokuDQNConfig):
        self.config = config
        self.board_size = config.board_size

        # 惩罚系数历史记录 - 记录每个批次的系数
        self.coefficient_history: List[Tuple[int, float]] = []
        # 记录初始系数
        self.coefficient_history.append((0, config.batch_penalty_coefficient))

        # 奖励配置
        self.attack_rewards = {
            4: 3.0,  # 四子连线
            3: 1.0,  # 三子连线
            2: 0.2  # 二子连线
        }

    def calculate_win_rate_bonus(self, win_rate: float) -> float:
        """计算胜率奖励"""
        if not self.config.enable_win_rate_rewards:
            return 0.0

        win_rate_rewards = getattr(self.config, 'win_rate_rewards_in_batch')
        bonus = 0.0

        for threshold, reward in sorted(win_rate_rewards.items()):
            if win_rate >= threshold:
                bonus = reward
            else:
                break

        return bonus

    def calculate_batch_penalty(self, current_rate: float, previous_rate: float) -> float:
        """计算批次胜率下降惩罚"""
        if not self.config.enable_batch_penalty:
            return 0.0

        if current_rate >= previous_rate:
            return 0.0

        decline = previous_rate - current_rate
        if decline < self.config.batch_min_penalty_threshold:
            return 0.0

        penalty = min(
            decline * self.config.batch_penalty_coefficient * 100,
            self.config.batch_max_penalty
        )
        return penalty

    def calculate_penalty_effectiveness(self, batch_win_rates: List[float],
                                        penalty_history: List[Tuple[int, float]]) -> float:
        """计算惩罚效果评估"""
        if len(batch_win_rates) < 3 or not penalty_history:
            return 0.0

        effectiveness_scores = []
        recent_penalties = penalty_history[-TrainConstants.MAX_RECENT_COMPARISONS:]

        # 对每条惩罚记录：
        # - 找到该惩罚对应的批次索引
        # - 检查索引是否有效（需要有前后批次数据进行比较）
        # - 比较惩罚前后的胜率变化
        # - 如果惩罚后胜率提高，则认为惩罚有效（得分1.0），否则无效（得分0.0）
        for episode, penalty in recent_penalties:
            penalty_batch_idx = self._get_penalty_batch_index(episode, penalty_history)

            if self._is_valid_batch_index(penalty_batch_idx, batch_win_rates):
                before_rate = batch_win_rates[penalty_batch_idx - 1]
                after_rate = batch_win_rates[penalty_batch_idx + 1]
                effectiveness_scores.append(1.0 if after_rate > before_rate else 0.0)

        # 计算所有有效惩罚的平均得分，范围在0.0到1.0之间
        # 数值含义：effectiveness衡量的是"惩罚是否真的帮助AI提高了胜率"的比例。
        # 例如：
        # - effectiveness = 1.0 ：所有惩罚都有效（100%的惩罚后胜率都提高了）
        # - effectiveness = 0.667 ：66.7%的惩罚有效（2/3的惩罚后胜率提高了）
        # - effectiveness = 0.5 ：50%的惩罚有效（一半的惩罚后胜率提高了）
        # - effectiveness = 0.0 ：所有惩罚都无效（0%的惩罚后胜率提高了）
        return sum(effectiveness_scores) / len(effectiveness_scores) if effectiveness_scores else 0.0

    def _get_penalty_batch_index(self, episode: int, penalty_history: List[Tuple[int, float]]) -> int:
        """获取惩罚对应的批次索引"""
        penalty_index = next((i for i, (ep, _) in enumerate(penalty_history) if ep == episode), -1)
        if penalty_index == -1:
            return -1

        return penalty_index

    def _is_valid_batch_index(self, batch_idx: int, batch_win_rates: List[float]) -> bool:
        """检查批次索引是否有效"""
        return 1 <= batch_idx < len(batch_win_rates) - 1

    def calculate_intermediate_reward(self, board, row: int, col: int, player: int,
                                      move_count: int, threat_penalty: float) -> float:
        """计算中间奖励"""
        base_reward = -0.01  # 基础步骤惩罚

        # 进攻奖励
        attack_reward = self._calculate_attack_reward(board, row, col, player)

        # 位置价值奖励
        position_reward = self._calculate_position_reward(row, col)

        # 长局惩罚
        long_game_penalty = -0.005 if move_count > 50 else 0

        total_reward = (base_reward + attack_reward + position_reward +
                        long_game_penalty - threat_penalty)

        return total_reward

    def _calculate_attack_reward(self, board, r: int, c: int, player: int) -> float:
        """计算进攻奖励"""
        max_line = self._get_max_line_length(board, r, c, player)
        return self.attack_rewards.get(max_line, 0)

    def _calculate_position_reward(self, row: int, col: int) -> float:
        """计算位置价值奖励（中心区域更有价值）"""
        center = self.board_size // 2
        distance_from_center = abs(row - center) + abs(col - center)
        return max(0, (center - distance_from_center) * 0.01)

    def _get_max_line_length(self, board, r: int, c: int, player: int) -> int:
        """获取某个位置在所有方向上的最大连线长度"""
        directions = [(0, 1), (1, 0), (1, 1), (1, -1)]
        max_length = 0

        for dr, dc in directions:
            length = self._count_line(board, r, c, dr, dc, player)
            max_length = max(max_length, length)

        return max_length

    def _count_line(self, board, r: int, c: int, dr: int, dc: int, player: int) -> int:
        """计算从(r,c)位置沿(dr,dc)方向的连续棋子数"""
        count = 1  # 包含当前位置

        # 正方向
        for i in range(1, 5):
            nr, nc = r + dr * i, c + dc * i
            if (0 <= nr < self.board_size and 0 <= nc < self.board_size and
                    board[nr, nc] == player):
                count += 1
            else:
                break

        # 反方向
        for i in range(1, 5):
            nr, nc = r - dr * i, c - dc * i
            if (0 <= nr < self.board_size and 0 <= nc < self.board_size and
                    board[nr, nc] == player):
                count += 1
            else:
                break

        return count

    def record_coefficient_for_batch(self, episode: int) -> None:
        """记录当前批次的惩罚系数"""
        current_coefficient = self.config.batch_penalty_coefficient
        # 检查是否已经记录过这个episode的系数
        if not self.coefficient_history or self.coefficient_history[-1][0] != episode:
            self.coefficient_history.append((episode, current_coefficient))

    def adjust_penalty_coefficient(self, effectiveness: float, episode: int) -> None:
        """根据惩罚效果评估动态调整惩罚系数"""
        if not self.config.enable_batch_penalty:
            return

        current_coefficient = self.config.batch_penalty_coefficient
        target_effectiveness = self.config.target_effectiveness
        min_coefficient = self.config.min_coefficient
        max_coefficient = self.config.max_coefficient
        adjustment_rate = self.config.adjustment_rate

        # 根据效果评估调整系数
        if effectiveness < target_effectiveness:
            # 效果不佳，降低惩罚系数（让惩罚更温和）
            adjustment_factor = 1 - adjustment_rate * (target_effectiveness - effectiveness) / target_effectiveness
            new_coefficient = current_coefficient * adjustment_factor
        else:
            # 效果良好，适当提高惩罚系数（让惩罚更严格）
            adjustment_factor = 1 + adjustment_rate * (effectiveness - target_effectiveness) / (
                        1 - target_effectiveness)
            new_coefficient = current_coefficient * adjustment_factor

        # 限制在合理范围内
        new_coefficient = max(min_coefficient, min(new_coefficient, max_coefficient))

        # 更新配置
        self.config.batch_penalty_coefficient = new_coefficient

        # 记录调整后的系数（会覆盖或添加当前episode的记录）
        if self.coefficient_history and self.coefficient_history[-1][0] == episode:
            # 如果当前episode已有记录，更新它
            self.coefficient_history[-1] = (episode, new_coefficient)
        else:
            # 否则添加新记录
            self.coefficient_history.append((episode, new_coefficient))

        # 记录调整信息
        print(f"🔧 \n惩罚系数调整: {current_coefficient:.3f} -> {new_coefficient:.3f} (效果评估: {effectiveness:.3f})")

    def get_coefficient_adjustment_summary(self) -> Dict[str, Any]:
        """获取系数调整摘要"""
        return {
            'coefficient_history': self.coefficient_history,
            'current_coefficient': self.config.batch_penalty_coefficient,
            'total_adjustments': len(self.coefficient_history)
        }
