import torch._functorch.config
import torch._functorch.partitioners as partitioners
import torch._inductor.config
from torch.nn.attention import SDPBackend, sdpa_kernel

import torch

torch.set_float32_matmul_precision("high")


def config_torch_compile(optimize=False):
    if optimize:
        # torch._dynamo.config.cache_size_limit = 128
        torch._functorch.config.activation_memory_budget = 0.95
        torch._dynamo.config.compiled_autograd = True
        # torch._inductor.config.max_fusion_size = 128
        # torch._inductor.config.permute_fusion = "1"
        # torch._inductor.config.autotune_multi_device = "1"
        torch._inductor.config.compile_threads = 32


def use_spda_da2(func):
    def _inner(*args, **kwargs):
        with sdpa_kernel(
            [SDPBackend.CUDNN_ATTENTION, SDPBackend.FLASH_ATTENTION], set_priority=True
        ):
            ret = func(*args, **kwargs)
        return ret

    return _inner


# config_torch_compile(True)
