# my_medical_reward_manager.py
import torch
from collections import defaultdict

from verl import DataProto
from verl.workers.reward_manager import register

@register("medmatch")  # 这里注册为"medmatch"
class MedMatchRewardManager:
    def __init__(
        self,
        tokenizer,
        num_examine,
        compute_score=None,
        reward_fn_key="data_source",
        max_resp_len=None,
        overlong_buffer_cfg=None,
    ):
        self.tokenizer = tokenizer
        self.num_examine = num_examine
        self.reward_fn_key = reward_fn_key
        self.max_resp_len = max_resp_len
        self.compute_score =  self.default_compute_score
        self.overlong_buffer_cfg = overlong_buffer_cfg

    def extract_answer(self, response_str):
        """提取<answer>标签里的诊断结论"""
        import re
        match = re.search(r"<answer>(.*?)</answer>", response_str, re.DOTALL)
        if match:
            return match.group(1).strip()
        return ""  

    def default_compute_score(self, data_source, solution_str, ground_truth, extra_info=None):
        pred = self.extract_answer(solution_str)
        pred = pred.rstrip("。.")
        label = ground_truth
        # 最小子串，去掉前缀
        min_label = label[9:] if label.startswith("综上，本图片诊断为") else label

        # 1. 完全一致，满分
        if pred == label:
            score = 1.0
        # 2. 只要出现最小子串，也给激励
        elif min_label in pred:
            score = 0.2
        else:
            score = -1.0

        # 打印时用 min_label 替代 label
        result = {"score": score, "acc": int(score > 0), "pred": pred, "mini_label": min_label}
        return result

    def __call__(self, data: DataProto, return_dict: bool = False):
        """
        主入口，被verl训练流程调用。
        对batch内每条数据，解码、提取输出、打分、惩罚、日志打印，最终返回reward tensor。
        """
        # 初始化奖励Tensor，shape与responses相同（float32）
        reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32)
        reward_extra_info = defaultdict(list)  # 用于保存每条的score、acc、pred、label等
        already_print_data_sources = {}  # 控制每个data_source打印次数

        for i in range(len(data)):
            data_item = data[i]  # 取出第i条样本

            # 解码prompt
            prompt_ids = data_item.batch["prompts"]
            prompt_length = prompt_ids.shape[-1]
            valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum()  # 计算有效token
            valid_prompt_ids = prompt_ids[-valid_prompt_length:]

            # 解码response
            response_ids = data_item.batch["responses"]
            valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum()
            valid_response_ids = response_ids[:valid_response_length]

            # 转为文本
            prompt_str = self.tokenizer.decode(valid_prompt_ids, skip_special_tokens=True)
            response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True)

            # 去除结尾eos token
            eos_token = self.tokenizer.eos_token
            if response_str.endswith(eos_token):
                response_str = response_str[: -len(eos_token)]

            # 获取ground_truth标签
            ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"]

            # 其它元数据
            data_source = data_item.non_tensor_batch.get(self.reward_fn_key, None)
            extra_info = data_item.non_tensor_batch.get("extra_info", None)

            # 实际计算得分，字典返回score/acc/pred/label
            result = self.compute_score(
                data_source=data_source,
                solution_str=response_str,
                ground_truth=ground_truth,
                extra_info=extra_info,
            )

            score = result["score"]
            # 保存各种奖励相关指标，用于日志/分析
            for key, value in result.items():
                reward_extra_info[key].append(value)

            reward = score  # 基础奖励分数

            # 超长惩罚机制
            if self.overlong_buffer_cfg and getattr(self.overlong_buffer_cfg, "enable", False):
                overlong_buffer_len = self.overlong_buffer_cfg.len
                expected_len = self.max_resp_len - overlong_buffer_len
                exceed_len = valid_response_length - expected_len
                overlong_penalty_factor = self.overlong_buffer_cfg.penalty_factor
                # 线性惩罚，超过上限部分reward线性减少
                overlong_reward = min(-exceed_len / overlong_buffer_len * overlong_penalty_factor, 0)
                reward += overlong_reward
                if getattr(self.overlong_buffer_cfg, "log", False):
                    reward_extra_info["overlong_reward"].append(overlong_reward)
                    reward_extra_info["overlong"].append(overlong_reward < 0)

            # 只对最后一个token（通常是eos）赋reward
            reward_tensor[i, valid_response_length - 1] = reward

            # 打印部分样本，便于人工检查训练效果
            if data_source not in already_print_data_sources:
                already_print_data_sources[data_source] = 0
            if already_print_data_sources[data_source] < self.num_examine:
                already_print_data_sources[data_source] += 1
                print("[prompt]", prompt_str)
                print("[response]", response_str)
                print("[ground_truth]", ground_truth)
                for key, value in result.items():
                    print(f"[{key}]", value)

        # 支持返回奖励tensor和详细打分信息
        if return_dict:
            return {
                "reward_tensor": reward_tensor,
                "reward_extra_info": reward_extra_info,
            }
        else:
            return reward_tensor