"""
会议调度奖励管理器

集成到verl框架的奖励管理系统
"""

import torch
import numpy as np
from typing import Dict, List, Any
from collections import defaultdict
import json

import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))

from verl.workers.reward_manager import register
from verl.protocol import DataProto


@register("meeting_scheduling_reward")
class MeetingSchedulingRewardManager:
    """
    会议调度奖励管理器

    用于计算批量数据的奖励，集成到PPO训练流程
    """

    def __init__(self,
                 tokenizer,
                 num_examine: int = 8,
                 compute_score=None,
                 reward_fn_key: str = "data_source"):
        """
        参数:
            tokenizer: 分词器
            num_examine: 检查的样本数（用于日志）
            compute_score: 自定义奖励计算函数
            reward_fn_key: 数据源字段key
        """
        self.tokenizer = tokenizer
        self.num_examine = num_examine
        self.reward_fn_key = reward_fn_key

        # 使用默认计算函数
        self.compute_score = compute_score or self._default_compute_score

        # 奖励权重
        self.weights = {
            'required_attendance': 100,
            'optional_attendance': 1,
            'historical_match': 5,
            'negotiation_cost': 2,
        }

    def _default_compute_score(self,
                              data_source: str,
                              solution_str: str,
                              ground_truth: Dict,
                              extra_info: Dict) -> Dict[str, float]:
        """
        默认的奖励计算函数

        参数:
            data_source: 数据来源标识
            solution_str: Agent的解决方案（文本）
            ground_truth: 真实答案/评估数据
            extra_info: 额外信息

        返回:
            包含score和其他指标的字典
        """
        try:
            # 从ground_truth中提取评估信息
            required_met = ground_truth.get('required_met', False)
            required_available = ground_truth.get('required_available', 0)
            required_total = ground_truth.get('required_total', 1)
            optional_available = ground_truth.get('optional_available', 0)
            optional_total = ground_truth.get('optional_total', 0)
            historical_match = ground_truth.get('historical_match', False)
            negotiations_count = ground_truth.get('negotiations_count', 0)

            # 计算奖励
            reward = 0.0

            # 1. 必选人员
            if not required_met:
                missing_required = required_total - required_available
                reward -= missing_required * self.weights['required_attendance']

            # 2. 可选人员
            if required_met and optional_total > 0:
                optional_rate = optional_available / optional_total
                reward += optional_rate * optional_total * self.weights['optional_attendance']

            # 3. 历史匹配
            if required_met and historical_match:
                reward += self.weights['historical_match']

            # 4. 协商成本
            reward -= negotiations_count * self.weights['negotiation_cost']

            # 返回详细结果
            return {
                'score': float(reward),
                'required_met': required_met,
                'required_rate': required_available / max(1, required_total),
                'optional_rate': optional_available / max(1, optional_total),
                'historical_match': float(historical_match),
                'negotiations_count': negotiations_count
            }

        except Exception as e:
            # 出错时返回惩罚
            return {
                'score': -100.0,
                'error': str(e),
                'required_met': False,
                'required_rate': 0.0,
                'optional_rate': 0.0,
                'historical_match': 0.0,
                'negotiations_count': 0
            }

    def __call__(self, data: DataProto, return_dict: bool = False):
        """
        计算批量奖励

        参数:
            data: DataProto对象，包含batch数据
            return_dict: 是否返回详细信息字典

        返回:
            reward_tensor: 奖励张量，shape=(batch_size, response_length)
            或
            dict: 包含rewards和额外信息的字典
        """
        # 初始化奖励张量
        reward_tensor = torch.zeros_like(
            data.batch['responses'],
            dtype=torch.float32
        )

        # 额外信息收集
        reward_extra_info = defaultdict(list)

        # 遍历batch中的每个样本
        for i in range(len(data)):
            data_item = data[i]

            # 解码prompts和responses
            prompt_ids = data_item.batch['prompts']
            response_ids = data_item.batch['responses']
            response_mask = data_item.batch.get('responses_mask', None)

            # 找到有效的response部分
            if response_mask is not None:
                valid_response_mask = response_mask.bool()
                valid_response_ids = response_ids[valid_response_mask]
                valid_response_length = valid_response_mask.sum().item()
            else:
                valid_response_ids = response_ids
                valid_response_length = len(response_ids)

            # 解码
            try:
                response_str = self.tokenizer.decode(
                    valid_response_ids,
                    skip_special_tokens=True
                )
            except Exception as e:
                response_str = ""

            # 获取ground truth
            ground_truth = data_item.non_tensor_batch.get('reward_model', {}).get('ground_truth', {})

            # 获取数据源
            data_source = data_item.non_tensor_batch.get(self.reward_fn_key, 'unknown')

            # 获取额外信息
            extra_info = data_item.non_tensor_batch.get('extra_info', {})

            # 计算得分
            score_result = self.compute_score(
                data_source=data_source,
                solution_str=response_str,
                ground_truth=ground_truth,
                extra_info=extra_info
            )

            # 提取主要得分
            if isinstance(score_result, dict):
                reward = score_result['score']
                # 收集额外信息
                for key, value in score_result.items():
                    reward_extra_info[key].append(value)
            else:
                reward = float(score_result)
                reward_extra_info['score'].append(reward)

            # 将奖励放在response的最后一个位置
            if valid_response_length > 0:
                reward_tensor[i, valid_response_length - 1] = reward

        # 返回结果
        if return_dict:
            return {
                'rewards': reward_tensor,
                **reward_extra_info
            }
        else:
            return reward_tensor


def test_reward_manager():
    """测试奖励管理器"""
    print("=== 测试奖励管理器 ===\n")

    # 创建mock tokenizer
    class MockTokenizer:
        def decode(self, ids, skip_special_tokens=False):
            # 模拟解码
            return "Agent response: select_time Monday afternoon"

    tokenizer = MockTokenizer()

    # 创建奖励管理器
    reward_manager = MeetingSchedulingRewardManager(
        tokenizer=tokenizer,
        num_examine=2
    )

    # 创建mock数据
    class MockDataItem:
        def __init__(self, reward_info):
            self.batch = {
                'prompts': torch.tensor([1, 2, 3]),
                'responses': torch.tensor([4, 5, 6, 7, 8]),
                'responses_mask': torch.tensor([1, 1, 1, 1, 1])
            }
            self.non_tensor_batch = {
                'reward_model': {
                    'ground_truth': reward_info
                },
                'data_source': 'test',
                'extra_info': {}
            }

    class MockDataProto:
        def __init__(self, items):
            self.items = items
            self.batch = {
                'responses': torch.zeros((len(items), 5), dtype=torch.long)
            }

        def __len__(self):
            return len(self.items)

        def __getitem__(self, idx):
            return self.items[idx]

    # 测试场景1: 理想情况
    print("场景1: 理想情况")
    item1 = MockDataItem({
        'required_met': True,
        'required_available': 3,
        'required_total': 3,
        'optional_available': 3,
        'optional_total': 3,
        'historical_match': True,
        'negotiations_count': 0
    })

    data1 = MockDataProto([item1])
    result1 = reward_manager(data1, return_dict=True)

    print(f"奖励: {result1['rewards'][0, -1].item():.2f}")
    print(f"必选满足: {result1['required_met'][0]}")
    print(f"可选参与率: {result1['optional_rate'][0]:.2f}")
    print(f"历史匹配: {result1['historical_match'][0]}")

    # 测试场景2: 需要协商
    print("\n场景2: 需要协商")
    item2 = MockDataItem({
        'required_met': True,
        'required_available': 3,
        'required_total': 3,
        'optional_available': 2,
        'optional_total': 3,
        'historical_match': True,
        'negotiations_count': 1
    })

    data2 = MockDataProto([item2])
    result2 = reward_manager(data2, return_dict=True)

    print(f"奖励: {result2['rewards'][0, -1].item():.2f}")
    print(f"协商次数: {result2['negotiations_count'][0]}")

    # 测试场景3: 失败情况
    print("\n场景3: 失败情况（缺少必选人员）")
    item3 = MockDataItem({
        'required_met': False,
        'required_available': 2,
        'required_total': 3,
        'optional_available': 3,
        'optional_total': 3,
        'historical_match': True,
        'negotiations_count': 0
    })

    data3 = MockDataProto([item3])
    result3 = reward_manager(data3, return_dict=True)

    print(f"奖励: {result3['rewards'][0, -1].item():.2f}")
    print(f"必选满足: {result3['required_met'][0]}")

    print("\n测试完成")


if __name__ == '__main__':
    test_reward_manager()
