import os

import torch.utils.cpp_extension as thcpp
from icecream import ic
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension

os.environ["TORCH_CUDA_ARCH_LIST"] = "8.6;12.0"
os.environ["DISTUTILS_USE_SDK"] = "1"

_compiler = "cl"
ic(thcpp.is_ninja_available())
ic(thcpp.get_compiler_abi_compatibility_and_version(_compiler))
ic(thcpp.include_paths(device_type="cpu"))
ic(thcpp.include_paths(device_type="gpu"))

setup(
    name="my_layer",
    version="0.0.1",
    ext_modules=[
        CUDAExtension(
            name="my_layer",
            sources=["my_layer.cpp", "my_gemv_kernel.cu"],
            # extra_compile_args={"cxx": ["-O3"], "nvcc": ["-O3", "--use_fast_math"]},
            extra_compile_args={"cxx": ["/std:c++17"], "nvcc": ["--use_fast_math"]},
        ),
    ],
    cmdclass={"build_ext": BuildExtension.with_options(no_python_abi_suffix=True)},
)
