# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import re
import numpy as np
import torch
from pprint import pprint
from typing import List, Dict, Tuple
from transformers import PreTrainedTokenizerFast
from .reject_sample import _mask_dict_of_lists, _create_1d_object_array # Assuming shared utils
from verl.protocol import DataProto
from tensordict import TensorDict
from .utils import filter_by_mask, decode_prompt_response_str
import logging

# ... (All your calc_* functions remain unchanged) ...
def get_tool_list(extra_info_dict: dict, batch_size: int) -> List[Dict[str, Dict[str, int]]]:
    """
    Parses the extra_info_dict to extract tool usage information for each sample.
    """
    successful_usages = extra_info_dict.get("successful_usage_times", [{} for _ in range(batch_size)])
    failed_usages = extra_info_dict.get("failed_usage_times", [{} for _ in range(batch_size)])

    tool_list = []
    for i in range(batch_size):
        tools_in_sample = {}
        # Ensure we don't go out of bounds if lists are shorter than batch_size
        current_success = successful_usages[i] if i < len(successful_usages) else {}
        current_failed = failed_usages[i] if i < len(failed_usages) else {}
        all_tool_names = set((current_success or {}).keys()) | set((current_failed or {}).keys())

        for tool_name in all_tool_names:
            tools_in_sample[tool_name] = {
                "success": (current_success or {}).get(tool_name, 0),
                "fail": (current_failed or {}).get(tool_name, 0),
            }
        tool_list.append(tools_in_sample)
    return tool_list

# (Keep all your calc_* functions as they were, they are correct)
def calc_error_ratio_penalty_weights(response_text: List[str]) -> Tuple[np.ndarray, Dict]:
    def error_ratio(text, pattern=r'<tool_response>.*?</tool_response>'):
        matches = re.findall(pattern, text, re.DOTALL)
        error_count = len([match for match in matches if 'error' in match.lower()])
        if not matches: return 0.5, 0, 0
        return error_count / len(matches), error_count, len(matches)
    penalty_weights, total_error_count, total_res_count = [], 0, 0
    for text in response_text:
        penalty, errors, total = error_ratio(text)
        penalty_weights.append(penalty)
        total_error_count += errors
        total_res_count += total
    metrics = {
        'roc_error_ratio/global_err_ratio': total_error_count / total_res_count if total_res_count > 0 else 0,
        'roc_error_ratio/penalty_weight_mean': np.mean(penalty_weights) if penalty_weights else 0,
    }
    return np.array(penalty_weights), metrics

def calc_format_penalty_weights(response_text: List[str]) -> Tuple[np.ndarray, dict]:
    def check_boxed_answer_format(text: str) -> Tuple[float, int]:
        boxed_pattern = r"\\boxed{.*?}"
        matches = re.findall(boxed_pattern, text, re.DOTALL)
        num_answers = len(matches)
        penalty = 0.0 if num_answers == 1 else 1.0
        return penalty, num_answers
    
    def completion_of_think_tag(text: str, think_tags=["<think>", "</think>"],tool_tags=["<tool_call>","</tool_call>"],turn_pattern=r'<\|im_start\|>assistant.*?<\|im_end\|>') -> float:
        has_start_tag, has_end_tag = think_tags[0] in text, think_tags[1] in text
        num_turns = len(re.findall(turn_pattern, text, re.DOTALL))
        if has_start_tag != has_end_tag: return 2.0
        if has_start_tag and has_end_tag and text.count(think_tags[0]) != text.count(think_tags[1]): return 2.0
        if has_start_tag != num_turns: return 2.0
        #if tool tag exists inside think tag, penalize
        think_contents = re.findall(r'<think>(.*?)</think>', text, re.DOTALL)
        for content in think_contents:
            if any(tool_tag in content for tool_tag in tool_tags):
                return 1.0
        #if tool tag  exists before think tag in a turn, penalize
        turns = re.findall(turn_pattern, text, re.DOTALL)
        for turn in turns:
            think_pos = turn.find(think_tags[0]) if has_start_tag else -1
            tool_pos = turn.find(tool_tags[0]) if any(tag in turn for tag in tool_tags) else -1
            if tool_pos != -1 and (think_pos == -1 or tool_pos < think_pos):
                return 1.0
        return 0.0
    
    def answer_tag_repetition(text: str, answer_tags=["<answer>", "</answer>"], answer_pattern=r'<answer>.*?</answer>', turn_pattern=r'<\|im_start\|>assistant.*?<\|im_end\|>'):
        if any(ans_tag not in text for ans_tag in answer_tags):
            return 1.0, 0

        answer_tags_count = [text.count(ans_tag) for ans_tag in answer_tags]
        closed_ans_tag_count = len(re.findall(answer_pattern, text, re.DOTALL))
        if any(ans_tag_count!=closed_ans_tag_count for ans_tag_count in answer_tags_count):
            return 1.0, closed_ans_tag_count

        matches = re.findall(turn_pattern, text, re.DOTALL)
        num_turns = len(matches)
        if num_turns == 0:
            return 1.0, closed_ans_tag_count

        penalty_weight = min((closed_ans_tag_count - 1) / num_turns, 1.0)
        return penalty_weight, closed_ans_tag_count
    penalty_weights, total_ans_count, zero_ans_count, one_ans_count, gt_one_ans_count,total_boxed_count,zero_boxed_count,one_boxed_count,gt_one_boxed_count, = [], 0, 0, 0, 0,0,0,0,0,
    think_penaltys, ans_penaltys, boxed_penaltys = [], [], []
    for text in response_text:
        boxed_penalty, box_count = check_boxed_answer_format(text)
        ans_penalty,ans_count= answer_tag_repetition(text)
        think_penalty = completion_of_think_tag(text)
        think_penaltys.append(think_penalty)
        ans_penaltys.append(ans_penalty)
        boxed_penaltys.append(boxed_penalty)
        penalty_weights.append(max(max(boxed_penalty, think_penalty),ans_penalty))
        total_ans_count += ans_count
        if ans_count == 0: zero_ans_count += 1
        elif ans_count == 1: one_ans_count += 1
        else: gt_one_ans_count += 1
        if box_count == 0: zero_boxed_count += 1
        elif box_count == 1: one_boxed_count += 1
        else: gt_one_boxed_count += 1
        total_boxed_count += box_count
    mean_penalty= np.mean(penalty_weights) if penalty_weights else 0.0
    mean_think_penalty=np.mean(think_penaltys) if think_penaltys else 0.0   
    mean_answer_penalty=np.mean(ans_penaltys) if ans_penaltys else 0.0
    mean_boxed_penalty=np.mean(boxed_penaltys) if boxed_penaltys else 0.0
    metrics = {
        'roc_answer_format/answer_per_rollout_mean': total_ans_count / len(response_text) if response_text else 0.0,
        'roc_answer_format/zero_answer_count': zero_ans_count,
        'roc_answer_format/one_answer_count': one_ans_count,
        'roc_answer_format/gt_one_answer_count': gt_one_ans_count,
        'roc_answer_format/boxed_per_rollout_mean': total_boxed_count / len(response_text) if response_text else 0.0,
        'roc_answer_format/zero_boxed_count': zero_boxed_count,
        'roc_answer_format/one_boxed_count': one_boxed_count,
        'roc_answer_format/gt_one_boxed_count': gt_one_boxed_count,
        'roc_answer_format/penalty_weight_mean': mean_penalty,
        'roc_answer_format/think_penalty_mean': mean_think_penalty,
        'roc_answer_format/answer_penalty_mean': mean_answer_penalty,
    }
    return np.array(penalty_weights), metrics

def calc_difficulty_bonus_weights(batch: DataProto, history_dict: dict, tool_list: List[dict]) -> Tuple[np.ndarray, dict]:
    fail_counts = history_dict.get("fail_counts", {})
    total_counts = history_dict.get("total_counts", {})
    proportions = history_dict.get("proportion_of_success", {})
    tool_difficulties = { name: fail_counts.get(name, 0) / total if total > 0 else 0 for name, total in total_counts.items() }
    bonus_weights = []
    for tools_in_sample in tool_list:
        max_difficulty = 0.0
        rare_of_max_difficulty = 1.0
        for tool_name, usage in tools_in_sample.items():
            if usage.get("success", 0) > 0:
                difficulty = tool_difficulties.get(tool_name, 0.0)
                max_difficulty = max(max_difficulty, difficulty)
                rare_of_max_difficulty = min(rare_of_max_difficulty, proportions.get(tool_name, 1.0))
        if max_difficulty>0.2 and rare_of_max_difficulty<0.15:
            bonus=1.2
        elif max_difficulty>0.1 and rare_of_max_difficulty<0.15:
            bonus=0.6
        else:
            bonus=0.0
        bonus_weights.append(bonus)
    bonus_weights_np = np.array(bonus_weights)
    metrics = {
        'difficulty_reward/mean': np.mean(bonus_weights_np) if bonus_weights else 0.0,
        'difficulty_reward/max': np.max(bonus_weights_np) if bonus_weights else 0.0,
    }
    return bonus_weights_np, metrics

def calc_rare_tool_bonus_weights(batch: DataProto, history_dict: dict, tool_list: List[dict]) -> Tuple[np.ndarray, dict]:
    proportions = history_dict.get("proportion_of_success", {})
    RARE_THRESHOLD = 0.1
    bonus_weights = []
    for tools_in_sample in tool_list:
        min_proportion = 1.0
        for tool_name, usage in tools_in_sample.items():
            if usage.get("success", 0) > 0:
                min_proportion = min(min_proportion, proportions.get(tool_name, 1.0))
        if min_proportion < RARE_THRESHOLD/2:
            bonus = 1.8
        elif min_proportion < RARE_THRESHOLD:
            bonus = 1
        else:
            bonus = 0.0
        bonus_weights.append(bonus)
    bonus_weights_np = np.array(bonus_weights)
    metrics = {
        'rare_reward/mean': np.mean(bonus_weights_np) if bonus_weights else 0.0,
        'rare_reward/max': np.max(bonus_weights_np) if bonus_weights else 0.0,
    }
    return bonus_weights_np, metrics

def calc_over_tool_penalty_weights(batch: DataProto, history_dict: dict, tool_list: List[dict]) -> Tuple[np.ndarray, dict]:
    proportions= history_dict.get("proportion_of_success", {})
    OVER_TOOL_THRESHOLD = 0.4
    bonus_weight=[]
    for tools_in_sample in tool_list:
        max_proportion=0.0
        for tool_name, usage in tools_in_sample.items():
            if usage.get("success", 0)>0:
                max_proportion=max(max_proportion, proportions.get(tool_name, 0.0))
        if max_proportion>OVER_TOOL_THRESHOLD:
            bonus= -1.0
        else:
            bonus=0.0
        bonus_weight.append(bonus)
    bonus_weights_np=np.array(bonus_weight)
    metrics={
        'over_tool_penalty/mean': np.mean(bonus_weights_np) if bonus_weight else 0.0,
        'over_tool_penalty/max': np.max(bonus_weights_np) if bonus_weight else 0.0,
    }
    return bonus_weights_np, metrics

def calc_position_bonus_weights(extra_info_dict: dict, batch_size: int, history_dict: dict) -> Tuple[np.ndarray, dict]:
    proportion_not_0 = history_dict.get("proportion_of_image_id_not_0", 0.0)
    POSITION_THRESHOLD, POSITION_BONUS_VALUE = 0.2, 1.2
    if proportion_not_0 >= POSITION_THRESHOLD: return np.zeros(batch_size), {'position_bonus/mean': 0.0, 'position_bonus/active': 0.0}
    bonus_weights = []
    image_id_counts_list = extra_info_dict.get("image_id_usage_counts", [{} for _ in range(batch_size)])
    for id_counts in image_id_counts_list:
        bonus = POSITION_BONUS_VALUE if any(str(img_id) != '0' for img_id in (id_counts or {}).keys()) else 0.0
        bonus_weights.append(bonus)
    bonus_weights_np = np.array(bonus_weights)
    metrics = {
        'position_bonus/mean': np.mean(bonus_weights_np) if bonus_weights else 0.0,
        'position_bonus/active': 1.0,
    }
    return bonus_weights_np, metrics


def resample_of_correct(batch: DataProto, extra_info_dict: dict, tokenizer: PreTrainedTokenizerFast, config: dict,
                        do_sample=True, world_size=None, history_dict=None):

    # ... (function logic up to the sampling part remains the same) ...
    roc_error_ratio = config["roc_error_ratio"]
    roc_answer_format = config["roc_answer_format"]
    min_zero_reward_trace_num = config["min_zero_reward_trace_num"]
    min_non_zero_reward_trace_num = config["min_non_zero_reward_trace_num"]
    down_sample_to_n = config["down_sample_to_n"]
    if down_sample_to_n > 0:
        assert not (min_zero_reward_trace_num + min_non_zero_reward_trace_num > down_sample_to_n), \
            f"Invalid config: {min_zero_reward_trace_num=}, {min_non_zero_reward_trace_num=}, {down_sample_to_n=}"

    _, response_text = decode_prompt_response_str(batch, tokenizer)
    penalty_weights = np.zeros(len(response_text))
    metrics = {}
    
    tool_list = get_tool_list(extra_info_dict, len(batch))
    
    difficulty_bonus, difficulty_metrics = calc_difficulty_bonus_weights(batch, history_dict, tool_list)
    metrics.update({f"bonus/{k}": v for k, v in difficulty_metrics.items()})
    
    rare_bonus, rare_metrics = calc_rare_tool_bonus_weights(batch, history_dict, tool_list)
    metrics.update({f"bonus/{k}": v for k, v in rare_metrics.items()})
    
    position_bonus, position_metrics = calc_position_bonus_weights(extra_info_dict, len(batch), history_dict)
    metrics.update({f"bonus/{k}": v for k, v in position_metrics.items()})
    
    total_bonus = difficulty_bonus + rare_bonus + position_bonus
    
    _penalty_weights, _metrics = calc_error_ratio_penalty_weights(response_text)
    metrics.update(_metrics)
    if roc_error_ratio: penalty_weights += _penalty_weights
        
    _penalty_weights, _metrics = calc_format_penalty_weights(response_text)
    metrics.update(_metrics)
    if roc_answer_format: penalty_weights += _penalty_weights
    
    _penalty_weights, _metrics = calc_over_tool_penalty_weights(batch, history_dict, tool_list)
    metrics.update(_metrics)
    penalty_weights += _penalty_weights
        
    penalty_weights -= total_bonus

    if do_sample and down_sample_to_n > 0:
        uids = batch.non_tensor_batch['uid']
        unique_uids = np.unique(uids)
        final_indices_to_keep = []

        for uid in unique_uids:
            indices = np.where(uids == uid)[0]
            if len(indices) <= down_sample_to_n:
                final_indices_to_keep.extend(indices.tolist())
                continue
            
            uid_mask = (uids == uid)
            uid_rewards = batch.batch['token_level_scores'][uid_mask].sum(-1)
            uid_penalties = penalty_weights[uid_mask]
            
            zero_reward_pairs = [(idx, pen) for idx, rwd, pen in zip(indices, uid_rewards, uid_penalties) if rwd <= 0]
            non_zero_reward_pairs = [(idx, pen) for idx, rwd, pen in zip(indices, uid_rewards, uid_penalties) if rwd > 0]
            
            non_zero_reward_pairs.sort(key=lambda x: x[1]) # Sort by penalty (lower is better)
            
            # Proportional sampling based on original distribution
            target_zero_num = round(len(zero_reward_pairs) * down_sample_to_n / len(indices))
            target_non_zero_num = down_sample_to_n - target_zero_num
            
            # Adjust based on minimum required samples
            actual_zero_num = max(target_zero_num, min(min_zero_reward_trace_num, len(zero_reward_pairs)))
            actual_non_zero_num = max(target_non_zero_num, min(min_non_zero_reward_trace_num, len(non_zero_reward_pairs)))

            # Ensure we don't exceed the total limit
            if actual_zero_num + actual_non_zero_num > down_sample_to_n:
                 if actual_zero_num > target_zero_num: # reduce zero if it was boosted
                     actual_zero_num = down_sample_to_n - actual_non_zero_num
                 else: # reduce non-zero
                     actual_non_zero_num = down_sample_to_n - actual_zero_num
            
            actual_zero_num = min(actual_zero_num, len(zero_reward_pairs))
            actual_non_zero_num = min(actual_non_zero_num, len(non_zero_reward_pairs))

            choices = [p[0] for p in non_zero_reward_pairs[:actual_non_zero_num]] + \
                      [p[0] for p in zero_reward_pairs[:actual_zero_num]]
            final_indices_to_keep.extend(choices)

        if not final_indices_to_keep:
            return None, None, metrics
        
        masked_extra_info_dict = _mask_dict_of_lists(extra_info_dict, final_indices_to_keep)

        try:
            # Slicing with sorted indices ensures the new batch order is predictable
            new_batch = batch[sorted(final_indices_to_keep)]
        except (ValueError, IndexError) as e:
            # FIX: Replaced the original fallback with the more robust version
            print(f"Warning: DataProto slicing failed with '{e}'. Falling back to robust manual reconstruction.")
            all_items = list(batch)
            kept_items = [all_items[i] for i in sorted(final_indices_to_keep)]
            if not kept_items:
                return None, None, metrics

            new_tensor_batch_dict = {key: [] for key in batch.batch.keys()}
            new_non_tensor_batch_dict = {key: [] for key in batch.non_tensor_batch.keys()}

            for item in kept_items:
                for key, tensor in item.batch.items():
                    new_tensor_batch_dict[key].append(tensor)
                for key, value in item.non_tensor_batch.items():
                    new_non_tensor_batch_dict[key].append(value)
            
            final_tensors = {key: torch.stack(val) for key, val in new_tensor_batch_dict.items()}
            # Use the robust array creation helper function, assuming it's available
            final_non_tensors = {key: _create_1d_object_array(val) for key, val in new_non_tensor_batch_dict.items()}

            new_batch = DataProto(
                batch=TensorDict(final_tensors, batch_size=len(kept_items)),
                non_tensor_batch=final_non_tensors,
                meta_info=batch.meta_info
            )

        if world_size is not None and world_size > 0 and len(new_batch) > 0:
            max_batch_size = (len(new_batch) // world_size) * world_size
            if max_batch_size == 0: 
                return None, None, metrics
            final_alignment_indices = list(range(max_batch_size))
            masked_extra_info_dict = _mask_dict_of_lists(masked_extra_info_dict, final_alignment_indices)
            new_batch = new_batch[:max_batch_size]

        return new_batch, masked_extra_info_dict, metrics

    return batch, extra_info_dict, metrics