# In .../utils/down_sample/reject_sample.py

import numpy as np
import torch
from verl.protocol import DataProto
from tensordict import TensorDict # Import TensorDict

def _create_1d_object_array(data_list: list) -> np.ndarray:
    """
    Robustly creates a 1D numpy array with dtype=object, preventing numpy
    from creating multi-dimensional arrays from lists of lists/dicts.
    """
    if not data_list:
        return np.array([], dtype=object)
    
    new_array = np.empty(len(data_list), dtype=object)
    for i, item in enumerate(data_list):
        new_array[i] = item
    return new_array

def _mask_dict_of_lists(data_dict: dict, indices_to_keep: list) -> dict:
    """Filters each list in the dictionary based on the provided indices."""
    if not data_dict:
        return {}
    
    masked_dict = {}
    # Sorting is important to keep the order consistent with the sliced DataProto
    sorted_indices = sorted(indices_to_keep)
    
    for key, value_list in data_dict.items():
        # Ensure value_list is a sequence that can be indexed
        if not isinstance(value_list, (list, np.ndarray)):
            print(f"Warning: Value for key '{key}' is not a list or array. Skipping masking for this key.")
            masked_dict[key] = value_list
            continue

        # Basic check to avoid IndexError
        if sorted_indices and (len(value_list) <= max(sorted_indices)):
            print(f"Warning: List for key '{key}' (len: {len(value_list)}) is shorter than required by indices. Skipping masking for this key.")
            masked_dict[key] = value_list
            continue
        
        masked_dict[key] = [value_list[i] for i in sorted_indices]
        
    return masked_dict

def reject_equal_reward(batch: DataProto, extra_info_dict: dict, do_sample=True, world_size=None):
    if not do_sample:
        return batch, extra_info_dict, {}

    uids = batch.non_tensor_batch['uid']
    unique_uids = np.unique(uids)
    indices_to_keep = []
    
    solve_equal, solve_equal_zeros, solve_equal_non_all_zeros = 0, 0, 0

    for uid in unique_uids:
        indices_for_uid = np.where(uids == uid)[0]
        uid_mask = (uids == uid)
        uid_rewards = batch.batch['token_level_scores'][uid_mask].sum(-1)

        if len(uid_rewards) > 1 and torch.allclose(uid_rewards[0], uid_rewards):
            solve_equal += 1
            if torch.allclose(torch.zeros_like(uid_rewards), uid_rewards):
                solve_equal_zeros += 1
            else:
                solve_equal_non_all_zeros += 1
        else:
            indices_to_keep.extend(indices_for_uid.tolist())

    metrics = {
        'reject_equal_reward/solve_non_equal_total': len(unique_uids) - solve_equal,
        'reject_equal_reward/solve_equal_total': solve_equal,
        'reject_equal_reward/solve_equal_total_ratio': solve_equal / len(unique_uids) if len(unique_uids) > 0 else 0,
        'reject_equal_reward/solve_equal_zeros': solve_equal_zeros,
        'reject_equal_reward/solve_equal_non_all_zeros': solve_equal_non_all_zeros,
    }

    if not indices_to_keep:
        return None, None, metrics
        
    # 1. Mask the extra_info_dict first, using sorted indices for consistency
    masked_extra_info_dict = _mask_dict_of_lists(extra_info_dict, indices_to_keep)
    
    # 2. Slice the batch using its API or a robust fallback
    try:
        new_batch = batch[sorted(indices_to_keep)]
    except (ValueError, IndexError) as e:
        print(f"Warning: DataProto slicing failed with '{e}'. Falling back to robust manual reconstruction.")
        all_items = list(batch)
        kept_items = [all_items[i] for i in sorted(indices_to_keep)]

        if not kept_items:
            return None, None, metrics
            
        new_tensor_batch_dict = {key: [] for key in batch.batch.keys()}
        new_non_tensor_batch_dict = {key: [] for key in batch.non_tensor_batch.keys()}

        for item in kept_items:
            for key, tensor in item.batch.items():
                new_tensor_batch_dict[key].append(tensor)
            for key, value in item.non_tensor_batch.items():
                new_non_tensor_batch_dict[key].append(value)
        
        final_tensors = {key: torch.stack(val) for key, val in new_tensor_batch_dict.items()}
        # FIX: Use the robust array creation helper function
        final_non_tensors = {key: _create_1d_object_array(val) for key, val in new_non_tensor_batch_dict.items()}

        new_batch = DataProto(
            batch=TensorDict(final_tensors, batch_size=len(kept_items)),
            non_tensor_batch=final_non_tensors,
            meta_info=batch.meta_info
        )
        
    # 3. Final alignment to world size
    if world_size is not None and world_size > 0 and len(new_batch) > 0:
        max_batch_size = (len(new_batch) // world_size) * world_size
        if max_batch_size == 0:
            return None, None, metrics
        
        final_alignment_indices = list(range(max_batch_size))
        # This re-masks the already masked dict and batch
        masked_extra_info_dict = _mask_dict_of_lists(masked_extra_info_dict, final_alignment_indices)
        new_batch = new_batch[:max_batch_size]

    return new_batch, masked_extra_info_dict, metrics