"""
会议调度奖励计算器

支持多维度奖励评估：
- 决策质量（必选人员参与、可选人员参与）
- 历史偏好遵循度
- 协商效率
- 步骤序列评分
"""

from typing import Dict, List, Tuple


class MeetingReward:
    """会议调度奖励计算器"""

    # 奖励权重配置
    DEFAULT_WEIGHTS = {
        'required_attendance': 100,  # 必选人员缺席惩罚（per人）
        'optional_attendance': 1,     # 可选人员参与奖励（per人）
        'historical_match': 5,        # 历史模式匹配奖励
        'negotiation_cost': 2,        # 协商成本惩罚（per次）
        'invalid_action': 1,          # 无效动作惩罚
        'efficiency_bonus': 3,        # 高效决策奖励
    }

    def __init__(self, weights: Dict[str, float] = None):
        """
        参数:
            weights: 自定义权重配置
        """
        self.weights = weights or self.DEFAULT_WEIGHTS.copy()

    def calculate_reward(self,
                        required_available: int,
                        required_total: int,
                        optional_available: int,
                        optional_total: int,
                        historical_match: bool,
                        negotiations_count: int,
                        steps_count: int = 0,
                        max_steps: int = 20) -> Dict[str, float]:
        """
        计算综合奖励

        参数:
            required_available: 可参会的必选人员数
            required_total: 必选人员总数
            optional_available: 可参会的可选人员数
            optional_total: 可选人员总数
            historical_match: 是否匹配历史模式
            negotiations_count: 协商次数
            steps_count: 使用的步数
            max_steps: 最大步数

        返回:
            包含各维度得分和总分的字典
        """
        scores = {}

        # 1. 必选人员参与率（最高优先级）
        required_met = (required_available == required_total)
        if not required_met:
            missing_required = required_total - required_available
            scores['required_penalty'] = -missing_required * self.weights['required_attendance']
        else:
            scores['required_penalty'] = 0.0

        # 2. 可选人员参与率（仅在满足必选的前提下计分）
        if required_met and optional_total > 0:
            optional_rate = optional_available / optional_total
            scores['optional_reward'] = optional_rate * optional_total * self.weights['optional_attendance']
        else:
            scores['optional_reward'] = 0.0

        # 3. 历史模式匹配（仅在满足必选的前提下计分）
        if required_met and historical_match:
            scores['historical_reward'] = self.weights['historical_match']
        else:
            scores['historical_reward'] = 0.0

        # 4. 协商成本
        scores['negotiation_penalty'] = -negotiations_count * self.weights['negotiation_cost']

        # 5. 效率奖励（步数少且成功）
        if required_met and steps_count > 0:
            efficiency_rate = 1.0 - (steps_count / max_steps)
            if efficiency_rate > 0.7:  # 使用少于30%的步数
                scores['efficiency_bonus'] = self.weights['efficiency_bonus']
            else:
                scores['efficiency_bonus'] = 0.0
        else:
            scores['efficiency_bonus'] = 0.0

        # 总分
        scores['total_reward'] = sum(scores.values())

        return scores

    def evaluate_strategy(self,
                         episode_history: List[Dict],
                         final_result: Dict) -> Dict[str, any]:
        """
        评估整个决策策略

        参数:
            episode_history: 步骤历史，每个元素包含：
                {
                    'step': int,
                    'action_type': str,
                    'parameters': dict,
                    'observation': str,
                    'reward': float
                }
            final_result: 最终结果，包含：
                {
                    'required_met': bool,
                    'required_available': int,
                    'required_total': int,
                    'optional_available': int,
                    'optional_total': int,
                    'historical_match': bool,
                    'negotiations_count': int
                }

        返回:
            评估结果字典
        """
        evaluation = {
            'total_steps': len(episode_history),
            'success': final_result.get('required_met', False),
            'action_sequence': [step['action_type'] for step in episode_history],
            'reward_breakdown': {},
            'strategy_analysis': {}
        }

        # 计算最终奖励
        scores = self.calculate_reward(
            required_available=final_result.get('required_available', 0),
            required_total=final_result.get('required_total', 1),
            optional_available=final_result.get('optional_available', 0),
            optional_total=final_result.get('optional_total', 0),
            historical_match=final_result.get('historical_match', False),
            negotiations_count=final_result.get('negotiations_count', 0),
            steps_count=len(episode_history)
        )
        evaluation['reward_breakdown'] = scores

        # 策略分析
        # 1. 信息收集充分性
        query_calendar_count = sum(1 for s in episode_history if s['action_type'] == 'query_calendar')
        query_history_count = sum(1 for s in episode_history if s['action_type'] == 'query_history')

        evaluation['strategy_analysis']['calendar_queries'] = query_calendar_count
        evaluation['strategy_analysis']['history_queries'] = query_history_count
        evaluation['strategy_analysis']['information_gathering'] = (
            'sufficient' if (query_calendar_count >= 1 and query_history_count >= 1) else 'insufficient'
        )

        # 2. 协商策略
        negotiation_count = sum(1 for s in episode_history if s['action_type'] == 'negotiate_time')
        evaluation['strategy_analysis']['negotiations_used'] = negotiation_count
        evaluation['strategy_analysis']['negotiation_efficiency'] = (
            'efficient' if negotiation_count <= 2 else 'excessive'
        )

        # 3. 决策速度
        evaluation['strategy_analysis']['decision_speed'] = (
            'fast' if len(episode_history) <= 5 else
            'moderate' if len(episode_history) <= 10 else
            'slow'
        )

        # 4. 最优性评估
        # 计算理论最优奖励（假设条件）
        theoretical_best = self._calculate_theoretical_best(final_result)
        actual_reward = scores['total_reward']
        optimality_gap = theoretical_best - actual_reward

        evaluation['strategy_analysis']['theoretical_best_reward'] = theoretical_best
        evaluation['strategy_analysis']['actual_reward'] = actual_reward
        evaluation['strategy_analysis']['optimality_gap'] = optimality_gap
        evaluation['strategy_analysis']['optimality_rate'] = (
            actual_reward / theoretical_best if theoretical_best > 0 else 0.0
        )

        return evaluation

    def _calculate_theoretical_best(self, final_result: Dict) -> float:
        """计算理论最优奖励"""
        # 假设：
        # - 所有必选人员都参与
        # - 所有可选人员都参与
        # - 匹配历史模式
        # - 不使用协商（或最少协商）
        # - 使用最少步数（3-4步：查日程、查历史、选择）

        required_total = final_result.get('required_total', 1)
        optional_total = final_result.get('optional_total', 0)

        best_reward = 0.0

        # 必选人员全部参与（无惩罚）
        best_reward += 0

        # 可选人员全部参与
        best_reward += optional_total * self.weights['optional_attendance']

        # 匹配历史模式
        best_reward += self.weights['historical_match']

        # 不使用协商（理想情况）
        # best_reward += 0

        # 高效决策（4步内完成）
        best_reward += self.weights['efficiency_bonus']

        return best_reward

    def compare_strategies(self,
                          strategy_a: Dict,
                          strategy_b: Dict) -> str:
        """
        比较两种策略

        参数:
            strategy_a, strategy_b: evaluate_strategy的返回值

        返回:
            比较结果文本
        """
        comparison = []

        comparison.append("=== 策略对比 ===\n")

        # 成功率
        comparison.append(f"成功率:")
        comparison.append(f"  策略A: {'成功' if strategy_a['success'] else '失败'}")
        comparison.append(f"  策略B: {'成功' if strategy_b['success'] else '失败'}\n")

        # 奖励对比
        reward_a = strategy_a['reward_breakdown']['total_reward']
        reward_b = strategy_b['reward_breakdown']['total_reward']
        comparison.append(f"总奖励:")
        comparison.append(f"  策略A: {reward_a:.2f}")
        comparison.append(f"  策略B: {reward_b:.2f}")
        comparison.append(f"  差距: {abs(reward_a - reward_b):.2f}\n")

        # 效率对比
        comparison.append(f"决策效率:")
        comparison.append(f"  策略A: {strategy_a['total_steps']}步, "
                        f"{strategy_a['strategy_analysis']['decision_speed']}")
        comparison.append(f"  策略B: {strategy_b['total_steps']}步, "
                        f"{strategy_b['strategy_analysis']['decision_speed']}\n")

        # 协商对比
        comparison.append(f"协商次数:")
        comparison.append(f"  策略A: {strategy_a['strategy_analysis']['negotiations_used']}次")
        comparison.append(f"  策略B: {strategy_b['strategy_analysis']['negotiations_used']}次\n")

        # 最优性对比
        opt_a = strategy_a['strategy_analysis']['optimality_rate']
        opt_b = strategy_b['strategy_analysis']['optimality_rate']
        comparison.append(f"最优性:")
        comparison.append(f"  策略A: {opt_a:.2%}")
        comparison.append(f"  策略B: {opt_b:.2%}\n")

        # 结论
        if reward_a > reward_b:
            winner = "策略A"
        elif reward_b > reward_a:
            winner = "策略B"
        else:
            winner = "两者相同"

        comparison.append(f"结论: {winner}表现更优\n")

        return '\n'.join(comparison)


def test_reward_calculator():
    """测试奖励计算器"""
    print("=== 测试奖励计算器 ===\n")

    calculator = MeetingReward()

    # 场景A: 理想情况
    print("场景A: 理想情况")
    print("- 必选人员: 3/3")
    print("- 可选人员: 3/3")
    print("- 历史匹配: 是")
    print("- 协商次数: 0")

    scores_a = calculator.calculate_reward(
        required_available=3,
        required_total=3,
        optional_available=3,
        optional_total=3,
        historical_match=True,
        negotiations_count=0,
        steps_count=4,
        max_steps=20
    )

    print(f"奖励明细: {scores_a}")
    print(f"总奖励: {scores_a['total_reward']:.1f}\n")

    # 场景B: 需要协商
    print("场景B: 需要协商")
    print("- 必选人员: 3/3")
    print("- 可选人员: 2/3")
    print("- 历史匹配: 是")
    print("- 协商次数: 1")

    scores_b = calculator.calculate_reward(
        required_available=3,
        required_total=3,
        optional_available=2,
        optional_total=3,
        historical_match=True,
        negotiations_count=1,
        steps_count=6,
        max_steps=20
    )

    print(f"奖励明细: {scores_b}")
    print(f"总奖励: {scores_b['total_reward']:.1f}\n")

    # 场景C: 失败案例
    print("场景C: 失败案例（缺少必选人员）")
    print("- 必选人员: 2/3")
    print("- 可选人员: 3/3")
    print("- 历史匹配: 是")
    print("- 协商次数: 0")

    scores_c = calculator.calculate_reward(
        required_available=2,
        required_total=3,
        optional_available=3,
        optional_total=3,
        historical_match=True,
        negotiations_count=0,
        steps_count=4,
        max_steps=20
    )

    print(f"奖励明细: {scores_c}")
    print(f"总奖励: {scores_c['total_reward']:.1f}\n")

    # 测试策略评估
    print("\n=== 测试策略评估 ===\n")

    episode_history = [
        {'step': 1, 'action_type': 'query_calendar', 'parameters': {}, 'observation': '', 'reward': 0},
        {'step': 2, 'action_type': 'query_history', 'parameters': {}, 'observation': '', 'reward': 0},
        {'step': 3, 'action_type': 'negotiate_time', 'parameters': {}, 'observation': '', 'reward': 0},
        {'step': 4, 'action_type': 'select_time', 'parameters': {}, 'observation': '', 'reward': 6}
    ]

    final_result = {
        'required_met': True,
        'required_available': 3,
        'required_total': 3,
        'optional_available': 2,
        'optional_total': 3,
        'historical_match': True,
        'negotiations_count': 1
    }

    evaluation = calculator.evaluate_strategy(episode_history, final_result)

    print(f"总步数: {evaluation['total_steps']}")
    print(f"成功: {evaluation['success']}")
    print(f"动作序列: {evaluation['action_sequence']}")
    print(f"奖励明细: {evaluation['reward_breakdown']}")
    print(f"策略分析:")
    for key, value in evaluation['strategy_analysis'].items():
        print(f"  {key}: {value}")


if __name__ == '__main__':
    test_reward_calculator()
