import torch
from collections.abc import Mapping, Sequence


def move_to_device(obj, device):
    """
    Recursively moves all torch.Tensor objects in a nested data structure to the specified device.
    This operation is performed IN-PLACE for mutable containers like dictionaries and lists.
    Immutable containers (e.g., tuples) will be recreated.

    Args:
        obj: The Python object to process. This can be a tensor, a dictionary, a list, a tuple, or any nested combination of these.
        device: The target device to move the tensors to (e.g., 'cuda:0', 'cpu').

    Returns:
        The object with all tensors moved to the specified device. For mutable types
        like dicts and lists, this is the same object passed in. For immutable types
        like tuples, a new object is returned.
    """
    if isinstance(obj, torch.Tensor):
        return obj.to(device)
    if isinstance(obj, dict):
        for k, v in obj.items():
            obj[k] = move_to_device(v, device)
        return obj
    if isinstance(obj, list):
        for i, v in enumerate(obj):
            obj[i] = move_to_device(v, device)
        return obj
    if isinstance(obj, tuple):
        return tuple(move_to_device(x, device) for x in obj)
    return obj


def detect_best_attention_implementation():
    """
    Automatically detect the best available attention implementation.
    
    Returns:
        str: The best available attention implementation ('flash_attention_2', 'flash_attention_1', 'sdpa', or 'eager')
    
    Priority order:
    1. flash_attention_2 (best performance, lowest memory)
    2. flash_attention_1 (good performance, low memory)
    3. sdpa (good performance, moderate memory)
    4. eager (fallback, highest memory usage)
    """
    try:
        # Try to import flash_attention_2
        import flash_attn
        # Check if flash_attention_2 is available
        if hasattr(flash_attn, 'flash_attn_func'):
            print("✓ Flash Attention 2 detected and available")
            return 'flash_attention_2'
    except ImportError:
        pass
    
    try:
        # Try to import flash_attention_1
        import flash_attn
        # Check if flash_attention_1 is available
        if hasattr(flash_attn, 'flash_attn_func'):
            print("✓ Flash Attention 1 detected and available")
            return 'flash_attention_1'
    except ImportError:
        pass
    
    # Check if PyTorch SDPA is available (PyTorch 2.0+)
    if hasattr(torch.nn.functional, 'scaled_dot_product_attention'):
        print("✓ PyTorch SDPA detected and available")
        return 'sdpa'
    
    # Fallback to eager implementation
    print("⚠ No optimized attention implementation found, using eager (may be slower)")
    return 'eager'