import copy
from io import StringIO

import numpy as np

from aesara import scalar as aes
from aesara.graph.basic import Apply
from aesara.graph.op import _NoPythonOp
from aesara.graph.utils import MethodNotDefined
from aesara.link.c.interface import HideC
from aesara.scalar import Composite, Scalar
from aesara.scalar.basic import complex_types, upgrade_to_float_no_complex
from aesara.scalar.basic_scipy import Erfcinv, Erfinv
from aesara.tensor.elemwise import CAReduceDtype, DimShuffle, Elemwise


try:
    import pygpu
    from pygpu import gpuarray
    from pygpu.gpuarray import dtype_to_typecode
    from pygpu.reduction import ReductionKernel
    from pygpu.tools import ArrayArg
except ImportError:
    pass

from .basic_ops import GpuKernelBase, Kernel, as_gpuarray_variable, infer_context_name
from .fp16_help import load_w, write_w
from .type import GpuArrayType, gpu_context_type


def make_argument(v, name):
    return ArrayArg(np.dtype(v.type.dtype), name)


def as_C_string_const(s):
    return "\n".join('"%s\\n"' % (l.replace('"', '\\"')) for l in s.split("\n"))


def get_scal(dt):
    if dt == "float16":
        dt = "float32"
    return aes.get_scalar_type(dt)


def max_inputs_to_GpuElemwise(node_or_outputs):
    """
    Compute the maximum number of inputs that fit in a kernel call.
    """
    if isinstance(node_or_outputs, Apply):
        outputs = node_or_outputs.outputs
    else:
        outputs = node_or_outputs

    n_out = len(outputs)
    ndim = outputs[0].type.ndim

    ptr_size = 8
    # Even with call32, the interface does not change, and shapes,
    # strides, and offset are passed as 64-bits (8 bytes)
    int_size = 8

    # we take the limit from CUDA for now
    nb_bytes_total = 4096

    # Regardless of the number of arguments, we have:
    # - The total number of elements (int)
    # - The shape (int) on each dimension
    fixed_size = int_size + int_size * ndim

    # Each argument (input or output) has:
    # - 1 pointer (ptr)
    # - 1 offset (int)
    # - 1 stride (int) per dimension
    # Even if the tensor ends up being contiguous, code for the
    # non-contiguous case still needs to be generated.
    param_size = ptr_size + int_size + int_size * ndim

    # Remaining for inputs
    nb_bytes_for_inputs = nb_bytes_total - fixed_size - param_size * n_out

    # Maximum number of inputs
    max_nb_inputs = nb_bytes_for_inputs // param_size

    return max_nb_inputs


class GpuElemwise(_NoPythonOp, HideC, Elemwise):
    """
    Elemwise on the GPU.

    """

    params_type = gpu_context_type
    nin = property(lambda self: self.scalar_op.nin)
    nout = property(lambda self: self.scalar_op.nout)
    _f16_ok = True

    def __str__(self):
        if self.name is not None:
            return self.name
        items = str(sorted(self.inplace_pattern.items()))
        return f"GpuElemwise{{{self.scalar_op}}}{items}<gpuarray>"

    def max_inputs(self, node_or_outputs):
        return max_inputs_to_GpuElemwise(node_or_outputs)

    def make_node(self, *inputs):
        ctx_name = infer_context_name(*inputs)
        inputs = [as_gpuarray_variable(i, ctx_name) for i in inputs]
        out_info = Elemwise.get_output_info(self, GpuDimShuffle, *inputs)
        inputs = out_info[2]
        outputs = [
            GpuArrayType(broadcastable=br, context_name=ctx_name, dtype=dtype)()
            for dtype, br in zip(out_info[0], out_info[1])
        ]
        if len(outputs) > 1:
            raise NotImplementedError()

        if len(inputs) > max_inputs_to_GpuElemwise(outputs):
            raise NotImplementedError(
                "Can not make this GpuElemwise with that much inputs"
            )

        # Try to generate the kernel to catch SupportCodeErrors
        scal_ins = [get_scal(i.dtype) for i in inputs]
        fake_node = self.scalar_op.make_node(*[i() for i in scal_ins])
        try:
            code = fake_node.op.c_support_code_apply(fake_node, "test")
            if code:
                raise SupportCodeError(code)
        except MethodNotDefined:
            pass
        try:
            support_code = fake_node.op.c_support_code()
            if "struct" in support_code:
                # The macro is fine, the C++ struct is not.
                raise SupportCodeError(
                    "struct aren't supported in GpuElemwise support_code" + support_code
                )
        except MethodNotDefined:
            pass

        node = Apply(self, inputs, outputs)
        return node

    def get_params(self, node):
        return node.inputs[0].type.context

    def _get_vnames(self, node):
        inps = [f"i{n}" for n, _ in enumerate(node.inputs)]
        outs = [
            f"o{n}" if n not in self.inplace_pattern else inps[self.inplace_pattern[n]]
            for n, _ in enumerate(node.outputs)
        ]
        return inps, outs

    def _generate_op_string(self, node):
        inps, outs = self._get_vnames(node)
        scal_v_ins = [get_scal(i.dtype)() for i in node.inputs]

        # As float16 isn't a c type and most GPU don't compute on it,
        # We convert the computation to float32, and let libgpuarray
        # load in float16 and cast to float32 and do the reverse for
        # the output.
        scalar_op = self.scalar_op
        if isinstance(scalar_op, (aes.Cast, Composite)):
            scalar_op = scalar_op.clone_float32()
        fake_node = scalar_op.make_node(*scal_v_ins)
        scal_v_out = fake_node.outputs
        assert len(scal_v_out) == len(node.outputs)

        try:
            kop = fake_node.op.c_code(
                fake_node, "elem_scalar", inps, outs, dict(fail="return;")
            )
        except MethodNotDefined:
            raise AssertionError(
                "No c code for this scalar. Can not make a GpuElemwise"
            )
        # If the following assert fail, then we need to update the
        # code handler above.
        assert "npy_float16" not in kop

        support_code = ""
        try:
            # We accept only some c_support_code().
            # This filter is done in the make_node()
            support_code += fake_node.op.c_support_code()
        except MethodNotDefined:
            pass
        for npy, ga in [
            ("npy_bool", "ga_bool"),
            ("npy_uint8", "ga_ubyte"),
            ("npy_uint16", "ga_ushort"),
            ("npy_uint32", "ga_uint"),
            ("npy_uint64", "ga_ulong"),
            ("npy_int8", "ga_byte"),
            ("npy_int16", "ga_short"),
            ("npy_int32", "ga_int"),
            ("npy_int64", "ga_long"),
            ("npy_float16", "ga_half"),
            ("npy_float32", "ga_float"),
            ("npy_float64", "ga_double"),
        ]:
            kop = kop.replace(npy, ga)
        return support_code, kop

    def c_headers(self, **kwargs):
        return ["<numpy_compat.h>", "<gpuarray/types.h>", "<gpuarray/elemwise.h>"]

    def c_support_code_struct(self, node, name):
        return "\nGpuElemwise *ge;\n"

    def c_init_code_struct(self, node, name, sub):
        inps, outs = self._get_vnames(node)
        nargs = len(inps) + len(outs) - len(self.inplace_pattern)
        support_code, kop = self._generate_op_string(node)
        res = """
        gpuelemwise_arg args[%(nargs)s] = {{0}};
        """ % dict(
            nargs=nargs
        )

        for n, (i, name) in enumerate(zip(node.inputs, inps)):
            res += """
            args[%(n)s].name = %(name)s;
            args[%(n)s].typecode = %(typecode)s;
            args[%(n)s].flags = GE_READ;
            """ % dict(
                n=n, name='"{}"'.format(name), typecode=i.type.typecode
            )

        p = len(inps)
        for n, o in enumerate(node.outputs):
            if n in self.inplace_pattern:
                assert len(node.outputs) == 1
                res += "\nargs[%(n)s].flags |= GE_WRITE;\n" % dict(
                    n=self.inplace_pattern[n]
                )
            else:
                res += """
                args[%(n)s].name = %(name)s;
                args[%(n)s].typecode = %(typecode)s;
                args[%(n)s].flags = GE_WRITE;
                """ % dict(
                    n=p, name='"{}"'.format(outs[n]), typecode=o.type.typecode
                )
                p += 1

        res += """
        ge = GpuElemwise_new(%(ctx)s->ctx, %(support)s, %(kop)s, %(nargs)s, args, %(nd)s, GE_CONVERT_F16);
        if (ge == NULL) {
           PyErr_SetString(PyExc_RuntimeError, "Could not initialize elemwise support");
           %(fail)s
        }
        """ % dict(
            nargs=nargs,
            ctx=sub["params"],
            fail=sub["fail"],
            support=as_C_string_const(support_code),
            kop=as_C_string_const(kop),
            nd=node.inputs[0].ndim,
        )

        return res

    def c_cleanup_code_struct(self, node, name):
        return """
        GpuElemwise_free(ge);
        """

    def c_code(self, node, name, inputs, outputs, sub):
        nd = node.outputs[0].ndim
        fail = sub["fail"]
        initial_dims = ",".join("1" for i in range(nd))
        opname = str(self.scalar_op)
        ctx = sub["params"]
        nargs = len(node.inputs) + len(node.outputs) - len(self.inplace_pattern)

        # check that all inputs have valid dimensions
        emitted_inames = {}
        code = (
            """
        // +1 is so that MSVC is happy when nd == 0
        size_t dims[%(nd)s+1] = {%(initial_dims)s};
        void *rargs[%(nargs)s] = {0};
        int err;
        """
            % locals()
        )
        for idx, iname in enumerate(inputs):
            if iname in emitted_inames:
                assert emitted_inames[iname] is node.inputs[idx]
                continue

            broadcasts = map(int, node.inputs[idx].broadcastable)
            broadcasts = ", ".join(map(str, broadcasts))
            nd = node.inputs[idx].ndim
            code += (
                """
            int broadcasts_%(iname)s[%(nd)s+1] = {%(broadcasts)s};
            """
                % locals()
            )
            emitted_inames[iname] = node.inputs[idx]

        # check that all inputs have valid dimensions
        emitted_inames = {}
        for idx, iname in enumerate(inputs):
            code += f"rargs[{idx}] = &{iname}->ga;\n"
            if iname in emitted_inames:
                continue
            code += (
                """
        if (%(nd)s != PyGpuArray_NDIM(%(iname)s))
        {
            PyErr_Format(PyExc_TypeError,
                         "need %(nd)s dims, not %%u",
                         PyGpuArray_NDIM(%(iname)s));
            %(fail)s;
        }
        for (int i = 0; i< %(nd)s; ++i)
        {
            dims[i] = (dims[i] == 1) ? PyGpuArray_DIMS(%(iname)s)[i] : dims[i];
            if ((!(broadcasts_%(iname)s[i] &&
                 PyGpuArray_DIMS(%(iname)s)[i] == 1)) &&
                (dims[i] != PyGpuArray_DIMS(%(iname)s)[i]))
            {
                PyErr_Format(PyExc_ValueError,
                             "GpuElemwise. Input dimension mis-match. Input"
                             " %(idx)d (indices start at 0) has shape[%%d] == %%llu"
                             ", but the output's size on that axis is %%llu.",
                             i,
                             (unsigned long long)PyGpuArray_DIMS(%(iname)s)[i],
                             (unsigned long long)dims[i]
                            );
                %(fail)s;
            }
        }
            """
                % locals()
            )
            emitted_inames[iname] = True
        # check that all outputs have valid dimensions
        p = len(node.inputs)
        for idx, oname in enumerate(outputs):
            typecode = dtype_to_typecode(node.outputs[idx].dtype)
            if idx not in self.inplace_pattern.keys():
                code += (
                    """
        for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
            if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
            {
                Py_DECREF(%(oname)s);
                %(oname)s = NULL;
            }
        }
        if (%(oname)s && !GpuArray_CHKFLAGS(&(%(oname)s->ga), GA_C_CONTIGUOUS))
        {
            Py_XDECREF(%(oname)s);
            %(oname)s = NULL;
        }
        if (NULL == %(oname)s)
        {
            %(oname)s = pygpu_empty(%(nd)d, dims,
                            %(typecode)s, GA_C_ORDER,
                            %(ctx)s, Py_None);
            if (!%(oname)s) {
                %(fail)s
            }
        }
        rargs[%(p)s] = &%(oname)s->ga;
                """
                    % locals()
                )
                p += 1
            else:
                input_idx = self.inplace_pattern[idx]
                iname = inputs[input_idx]
                code += (
                    """
        Py_XDECREF(%(oname)s);
        %(oname)s = %(iname)s;
        Py_INCREF(%(oname)s);
        for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) {
            if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i])
            {
                PyErr_Format(PyExc_ValueError,
                             "GpuElemwise. Output dimension mis-match. Output"
                             " %(idx)d (indices start at 0), working inplace"
                             " on input %(input_idx)s, has shape[%%i] == %%llu"
                             ", but the output's size on that axis is %%llu.",
                             i,
                             (unsigned long long)PyGpuArray_DIMS(%(oname)s)[i],
                             (unsigned long long)dims[i]
                            );
                Py_DECREF(%(oname)s);
                %(oname)s = NULL;
                %(fail)s;
            }
        }
        """
                    % locals()
                )

        code += """
        if (GpuElemwise_call(ge, rargs, GE_BROADCAST) != GA_NO_ERROR) {
          PyErr_SetString(PyExc_RuntimeError, "Error in the elemwise call");
          %(fail)s
        }
        """ % dict(
            fail=sub["fail"]
        )

        return str(code)

    def prepare_node(self, node, storage_map, compute_map, no_recycling, impl=None):
        # Since we don't have a Python implementation, ignore `impl` and use C.
        return super().prepare_node(
            node, storage_map, compute_map, no_recycling, impl="c"
        )

    def c_code_cache_version(self):
        ver = self.scalar_op.c_code_cache_version()
        if ver:
            return (10, ver)
        else:
            return ver


class SupportCodeError(Exception):
    """
    We do not support certain things (such as the C++ complex struct).

    """


class GpuDimShuffle(DimShuffle):
    """
    DimShuffle on the GPU.

    """

    _f16_ok = True
    c_func_name = "APPLY_SPECIFIC(gpu_dimshuffle)"

    def make_node(self, input):
        ctx_name = infer_context_name(input)
        res = DimShuffle.make_node(self, input)
        otype = GpuArrayType(
            dtype=res.outputs[0].type.dtype,
            broadcastable=res.outputs[0].type.broadcastable,
            context_name=ctx_name,
        )
        input = as_gpuarray_variable(input, ctx_name)
        return Apply(self, [input], [otype()])

    def __str__(self):
        if self.inplace:
            s = "InplaceGpuDimShuffle{%s}"
        else:
            s = "GpuDimShuffle{%s}"
        return s % (",".join(str(x) for x in self.new_order))

    def perform(self, node, inp, out, params):
        (input,) = inp
        (storage,) = out

        res = input

        res = res.transpose(self.shuffle + self.drop)

        shape = list(res.shape[: len(self.shuffle)])
        for augm in self.augment:
            shape.insert(augm, 1)
        res = res.reshape(shape)

        if not self.inplace:
            res = res.copy()

        storage[0] = res


class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype, _NoPythonOp):
    """
    GpuCAReduceCuda is a Reduction along some dimensions by a scalar op.

    Parameters
    ----------
    reduce_mask
        The dimensions along which to reduce. The `reduce_mask` is a tuple of
        booleans (actually integers 0 or 1) that specify for each input
        dimension, whether to reduce it (1) or not (0).
    pre_scalar_op
        If present, must be a scalar op with only 1 input. We will execute it
        on the input value before reduction.

    Examples
    --------
    When scalar_op is an `aesara.scalar.basic.Add` instance:

      - reduce_mask == (1,) sums a vector to a scalar

      - reduce_mask == (1,0) computes the sum of each column in a matrix

      - reduce_mask == (0,1) computes the sum of each row in a matrix

      - reduce_mask == (1,1,1) computes the sum of all elements in a 3-tensor.

    Notes
    -----
    Any reduce_mask of all zeros is a sort of 'copy', and may be removed during
    graph optimization.

    This Op is a work in progress.

    This op was recently upgraded from just GpuSum a general CAReduce. Not
    many code cases are supported for scalar_op being anything other than
    scalar.Add instances yet.

    Important note: if you implement new cases for this op, be sure to
    benchmark them and make sure that they actually result in a speedup.
    GPUs are not especially well-suited to reduction operations so it is
    quite possible that the GPU might be slower for some cases.

    """

    __props__ = (
        "axis",
        "reduce_mask",
        "dtype",
        "acc_dtype",
        "scalar_op",
        "pre_scalar_op",
    )
    _f16_ok = True
    verbose = 0

    def __init__(
        self,
        scalar_op,
        axis=None,
        reduce_mask=None,
        dtype=None,
        acc_dtype=None,
        pre_scalar_op=None,
    ):
        if reduce_mask is not None:
            reduce_mask = tuple(reduce_mask)
        self.reduce_mask = reduce_mask

        # used to make sure that calls to scalar op
        # have unique name arguments
        self._n_scalar_op_calls = 0
        CAReduceDtype.__init__(
            self, scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype
        )
        self.pre_scalar_op = pre_scalar_op
        if pre_scalar_op:
            assert pre_scalar_op.nin == 1

    def __str__(self):
        pre = ""
        if self.pre_scalar_op:
            pre = f"pre={self.pre_scalar_op},red="
        ax = ""
        if self.axis is not None:
            ax = f"{{{', '.join(str(x) for x in self.axis)}}}"
        return f"GpuCAReduceCuda{{{pre}{str(self.scalar_op)}}}{ax}"

    def __setstate__(self, d):
        self.__dict__.update(d)
        # For unpickling of old ops.
        if not hasattr(self, "pre_scalar_op"):
            self.pre_scalar_op = None

    def make_node(self, x):
        x = as_gpuarray_variable(x, infer_context_name(x))
        if x.type.context.kind != b"cuda":
            raise TypeError("GpuCAReduceCuda doesn't work for non-cuda devices")
        ret = super().make_node(x)
        self = copy.copy(self)
        self.axis = ret.op.axis
        if self.pre_scalar_op:
            # Currently we only tested pre_scalar_op that don't cause
            # upcast.
            assert Elemwise(self.pre_scalar_op)(x).dtype == x.dtype
        if self.reduce_mask is None:
            if self.axis is None:
                reduce_mask = [1] * x.type.ndim
            else:
                reduce_mask = [0] * x.type.ndim
                for a in self.axis:
                    assert reduce_mask[a] == 0
                    reduce_mask[a] = 1
            self.reduce_mask = tuple(reduce_mask)

        if x.type.ndim != len(self.reduce_mask):
            raise TypeError(f"x must have rank {len(self.reduce_mask)}")
        if (
            "complex" in x.dtype
            or "complex" in ret.outputs[0].dtype
            or "complex" in self._acc_dtype(x.dtype)
        ):
            raise NotImplementedError("We don't support complex in gpu reduction")
        return Apply(
            self,
            [x],
            [
                GpuArrayType(
                    ret.outputs[0].dtype,
                    ret.outputs[0].type.broadcastable,
                    context_name=x.type.context_name,
                )()
            ],
        )

    def supports_c_code(self, inputs):
        """
        Returns True if the current op and reduce pattern has functioning C code.

        """
        # If we don't even have the right method, we certainly
        # don't support the C code
        # (This is the test that used to be implemented by
        # local_gpu_sum)
        pattern = "".join(str(i) for i in self.reduce_mask)
        if not hasattr(self, f"c_code_reduce_{pattern}"):
            return False

        # Now that this is a general reduction op, we might
        # have a method for a pattern, but that pattern
        # might not be implemented for the current scalar op.
        # To detect this more complicated situation, we
        # make fake arguments to c_code, try to run them,
        # and see if NotImplementedError gets raised.

        node = self.make_node(*inputs)

        name = "fake_name"

        inp = [f"fake_input_name_{i}" for i in range(len(inputs))]
        out = [f"fake_output_name_{i}" for i in range(len(node.outputs))]

        sub = {"fail": "fake failure code", "params": "fake context"}

        try:
            self.c_code(node, name, inp, out, sub)
            if not self.gpu_kernels(node, name):
                return False
        except NotImplementedError:
            return False
        return True

    def c_headers(self, **kwargs):
        return ["<numpy_compat.h>", "<gpuarray/types.h>"]

    def c_support_code(self, **kwargs):
        return """
        template <typename T>
        static T ceil_intdiv(T a, T b)
        {
            return (a/b) + ((a % b) ? 1: 0);
        }
        """

    def c_code(self, node, name, inp, out, sub):
        (x,) = inp
        (z,) = out

        nd_in = node.inputs[0].type.ndim
        nd_out = node.outputs[0].type.ndim
        # For complex, we need to use aesara_complex* in the c code to
        # have it run. But libgpuarray don't understand it.
        in_dtype = node.inputs[0].type.dtype_specs()[1]
        out_dtype = node.outputs[0].type.dtype_specs()[1]
        gin_dtype = "npy_" + node.inputs[0].dtype
        gout_dtype = "npy_" + node.outputs[0].dtype
        assert nd_in - nd_out == sum(self.reduce_mask)

        sio = StringIO()
        fail = sub["fail"]
        ctx = sub["params"]

        # check input
        print(
            """
        if (PyGpuArray_NDIM(%(x)s) != %(nd_in)s)
        {
            PyErr_Format(PyExc_TypeError,
                         "required nd=%(nd_in)s, got nd=%%u", PyGpuArray_NDIM(%(x)s));
            %(fail)s;
        }
        """
            % locals(),
            file=sio,
        )

        # It might be nice to use a property of the op class to do this,
        # but tensor.elemwise.CAReduce has this exact same check so I guess
        # this is OK to do
        if self.scalar_op in [aes.scalar_minimum, aes.scalar_maximum]:
            conds = [
                f"(PyGpuArray_DIMS({x})[{i}] == 0)"
                for i in range(nd_in)
                if self.reduce_mask[i]
            ]
            assert len(conds) > 0
            cond = "(" + " || ".join(conds) + ")"
            print(
                """
            if %(cond)s
            {
                PyErr_Format(PyExc_ValueError," tried to reduce a 0-length axis.");
                %(fail)s;
            }
            """
                % locals(),
                file=sio,
            )

        #
        # alloc an output if we need one
        #

        # check the basics of out output
        print(
            f"""
        if (  !{z}
           || (PyGpuArray_NDIM({z}) != {nd_out})
        """,
            file=sio,
        )

        # ensure that the output has the right non-reduced dimensions
        j = 0
        for i in range(nd_in):
            if not self.reduce_mask[i]:
                print(
                    " || (PyGpuArray_DIMS(%(z)s)[%(j)s] != PyGpuArray_DIMS(%(x)s)[%(i)d]) "
                    % locals(),
                    file=sio,
                )
                j += 1

        print(
            """
           )
        {
            """
            % locals(),
            file=sio,
        )
        if nd_out > 0:
            print(f"size_t new_dims[{nd_out}]; ", file=sio)
        else:
            print("size_t *new_dims=NULL; ", file=sio)

        j = 0
        for i in range(nd_in):
            if not self.reduce_mask[i]:
                print(
                    f"new_dims[{j}] = PyGpuArray_DIMS({x})[{i}];",
                    file=sio,
                )
                j += 1
        out_typecode = dtype_to_typecode(gout_dtype[4:])
        print(
            """
            Py_XDECREF(%(z)s);
            %(z)s = pygpu_empty(%(nd_out)s, new_dims,
                                %(out_typecode)s, GA_C_ORDER,
                                %(ctx)s, Py_None);
            if (NULL == %(z)s)
            {
                PyErr_Format(PyExc_RuntimeError, "Failed to allocate output");
                %(fail)s;
            }
        }
        """
            % locals(),
            file=sio,
        )

        # \begin bracket the reduction in a check that there is
        # actually work to do
        if getattr(self.scalar_op, "identity", None) == 0:
            zero_shp = f"GpuArray_memset(&{z}->ga, 0)"
        # TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
        else:
            scalar_op = self.scalar_op
            zero_shp = (
                """
            PyErr_Format(PyExc_NotImplementedError,
                         "GpuCAReduceCuda not implemented when input shape is 0"
                         " for this scalar_op: %(scalar_op)s");
            %(fail)s;
            """
                % locals()
            )
        print(
            """
        if (PyGpuArray_SIZE(%(z)s) && ! PyGpuArray_SIZE(%(x)s)){
            %(zero_shp)s;
        }
        else if (PyGpuArray_SIZE(%(z)s))
        {
        """
            % locals(),
            file=sio,
        )

        #
        # Now perform the reduction
        #

        if all(i == 1 for i in self.reduce_mask):
            # check if the tensor is ccontiguous, if true, use the c_code_reduce_ccontig code.
            # TODO: check if we are ccontiguous when we un-dimshuffle
            # TODO: if only some dims are ccontiguous, call version with less dims.
            print("if(%(x)s->ga.flags & GA_C_CONTIGUOUS){" % locals(), file=sio)
            self.c_code_reduce_ccontig(sio, node, name, x, z, fail)
            print("}else{", file=sio)
            getattr(self, f"c_code_reduce_{''.join(str(i) for i in self.reduce_mask)}")(
                sio, node, name, x, z, fail
            )
            print("}", file=sio)
        else:
            getattr(self, f"c_code_reduce_{''.join(str(i) for i in self.reduce_mask)}")(
                sio, node, name, x, z, fail
            )

        # \end bracket the reduction ...
        print(
            """
        }
        """
            % locals(),
            file=sio,
        )

        return sio.getvalue()

    def _makecall(
        self, node, name, x, z, fail, pattern=None, extra_dims=(), extra_strides=()
    ):
        """
        Return a string for making a kernel call.

        The return value looks something like:

            .. code-block:: c

                ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
                ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
                ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
                if (verbose)
                    printf("running kernel_reduce_10_%(name)s\\n");
                size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
                void *kernel_params[] = {
                        (void *)&PyGpuArray_DIMS(%(x)s)[0],
                        (void *)&PyGpuArray_DIMS(%(x)s)[1],
                        (void *)%(x)s->ga.data,
                        (void *)&%(x)s->ga.offset,
                        (void *)&stride_A0,
                        (void *)&stride_A1,
                        (void *)%(z)s->ga.data,
                        (void *)&%(z)s->ga.offset,
                        (void *)&stride_Z0};
                int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
                %(err_check)s
        """
        in_dtype = "npy_" + node.inputs[0].dtype
        out_dtype = "npy_" + node.outputs[0].dtype
        acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
        sio = StringIO()
        if pattern is None:
            pattern = "".join(str(c) for c in self.reduce_mask)
        ndim = len(self.reduce_mask)
        nd_out = ndim - sum(self.reduce_mask)
        shapes_format = f"shape=({','.join(['%llu'] * node.inputs[0].ndim)})"
        shapes_data = ",".join(
            [f"(size_t) PyGpuArray_DIMS({x})[{i}]" for i in range(node.inputs[0].ndim)]
        )
        k_var = f"kernel_reduce_{pattern}_{name}"
        params = []

        for i in range(ndim):
            params.append(f"(void *)&PyGpuArray_DIMS({x})[{i}]")
        for declaration, value in extra_dims:
            print(declaration % locals(), file=sio)
            params.append(value)
        params.append(f"(void *){x}->ga.data")
        params.append(f"(void *)&{x}->ga.offset")
        for i in range(ndim):
            print(
                """
            ssize_t stride_A%(i)d = PyGpuArray_STRIDES(%(x)s)[%(i)s]/sizeof(%(in_dtype)s);
            """
                % locals(),
                file=sio,
            )
            params.append("(void *)&stride_A%(i)d" % locals())
        for declaration, value in extra_strides:
            print(declaration % locals(), file=sio)
            params.append(value)

        params.append(f"(void *){z}->ga.data")
        params.append(f"(void *)&{z}->ga.offset")
        for i in range(nd_out):
            print(
                """
            ssize_t stride_Z%(i)d = PyGpuArray_STRIDES(%(z)s)[%(i)s]/sizeof(%(out_dtype)s);
            """
                % locals(),
                file=sio,
            )
            params.append("(void *)&stride_Z%(i)d" % locals())
        kernel_params = ", ".join(params)
        err_check = (
            """
            if (err != GA_NO_ERROR) {
                PyErr_Format(PyExc_RuntimeError,
                             "gpuarray error: %(k_var)s: %%s.",
                             GpuKernel_error(&%(k_var)s, err));
                %(fail)s;
            }
        """
            % locals()
        )
        print(
            """
            if (verbose)
                printf("running kernel_reduce_%(pattern)s_%(name)s\\n");
            size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2];
            void *kernel_params[] = { %(kernel_params)s };
            if (verbose>1)
                printf("n_threads[0]=%%lu, n_threads[1]=%%lu, "
                       "n_threads[2]=%%lu, n_threads=%%lu, "
                       "n_blocks[0]=%%lu, n_blocks[1]=%%lu, n_blocks[2]=%%lu, "
                       "n_blocks=%%lu, n_shared=%%d, %(shapes_format)s\\n",
                                  n_threads[0],n_threads[1],
                                  n_threads[2],
                                  n_threads[0]*n_threads[1]*
                                  n_threads[2],
                                  n_blocks[0],n_blocks[1],n_blocks[2],
                                  n_blocks[0]*n_blocks[1]*n_blocks[2],
                                  n_shared, %(shapes_data)s);
            int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
            %(err_check)s
            """
            % locals(),
            file=sio,
        )

        return sio.getvalue()

    def _k_decl(self, node, nodename, pattern=None, ndim=None, reduce_mask=None):
        """
        Return a string to declare a kernel function.

        The result will look something like this:

        .. code-block:: c

            KERNEL void kernel_reduce_110_%(nodename)s(
                    const ga_size d0,
                    const ga_size d1,
                    const ga_size d2,
                    const %(in_type)s *A,
                    const ga_size offset_A,
                    const ga_ssize sA0,
                    const ga_ssize sA1,
                    const ga_ssize sA2,
                    %(out_type)s * Z,
                    const ga_size offset_Z,
                    const ga_ssize sZ0)

        Since the nodename is unique, we don't need to put the name
        of the scalar_op in here.

        """
        in_dtype = node.inputs[0].dtype
        out_dtype = node.outputs[0].dtype
        in_type = gpuarray.dtype_to_ctype(in_dtype)
        out_type = gpuarray.dtype_to_ctype(out_dtype)
        if reduce_mask is None:
            reduce_mask = self.reduce_mask
        if ndim is None:
            ndim = len(reduce_mask)
        if pattern is None:
            pattern = "".join(str(i) for i in reduce_mask)
        kname = f"kernel_reduce_{pattern}"
        k_var = f"kernel_reduce_{pattern}_{nodename}"
        params = []
        sio = StringIO()

        print(
            f"""
            KERNEL void {kname}(
        """,
            file=sio,
        )
        for i in range(ndim):
            params.append("uintp")
            print(
                f"""
                    const ga_size d{i},
        """,
                file=sio,
            )
        params.append(gpuarray.GpuArray)
        params.append("uintp")
        print(
            f"""
                    const {in_type} *A, const ga_size offset_A,
        """,
            file=sio,
        )
        for i in range(ndim):
            params.append("intp")
            print(
                f"""
                    const ga_ssize sA{i},
        """,
                file=sio,
            )
        params.append(gpuarray.GpuArray)
        params.append("uintp")
        print(
            f"""
                    {out_type} * Z, const ga_size offset_Z
        """,
            file=sio,
        )
        for i in range(ndim - sum(reduce_mask)):
            params.append("intp")
            print(
                f"""
                    , const ga_ssize sZ{i}
        """,
                file=sio,
            )
        print(")", file=sio)
        return sio.getvalue(), kname, params, k_var

    def _k_init(self, node, nodename):
        in_dtype = node.inputs[0].dtype
        out_dtype = node.outputs[0].dtype
        acc_dtype = self._acc_dtype(node.inputs[0].dtype)
        # We need to use aesara_complex* and not npy_complex*
        in_type = gpuarray.dtype_to_ctype(in_dtype)
        out_type = gpuarray.dtype_to_ctype(out_dtype)
        acc_type = gpuarray.dtype_to_ctype(acc_dtype)

        return (
            """
                const int threadCount = blockDim.x * blockDim.y * blockDim.z;
                const int threadNum = threadIdx.z * blockDim.x * blockDim.y
                + threadIdx.y * blockDim.x + threadIdx.x;
                extern __shared__ %(acc_type)s buf[];
                A = (const %(in_type)s *)(((char *)A)+offset_A);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);
                %(acc_type)s myresult = 0;
        """
            % locals()
        )

    def _assign_init(self, first_item, dtype):
        """
        This return the initial value for myresult.
        If the scalar op have an identity value, return it.

        Otherwise, check that the scalar op is maximum or minimum
        and return first_item. It should be the first element of the reduction.
        As the maximum and minimum of the same value don't change, this work.

        """
        if hasattr(self.scalar_op, "identity"):
            return str(self.scalar_op.identity)
        else:
            assert isinstance(self.scalar_op, (aes.ScalarMaximum, aes.ScalarMinimum))
            if self.pre_scalar_op:  # TODO: multiple dtypes
                # dtype = node.inputs[0].dtype

                dummy_var = aes.Scalar(dtype=dtype)()

                dummy_node = self.pre_scalar_op.make_node(dummy_var)

                dummy_name = "assign_init_pre_scalar_op" + str(self._n_scalar_op_calls)
                self._n_scalar_op_calls += 1
                t = self.pre_scalar_op.c_code(
                    dummy_node, dummy_name, (first_item,), ("",), {}
                )
                assert t.startswith(" = ")
                first_item = t[3:]
                if first_item[-1] == ";":
                    first_item = first_item[:-1]

            return first_item

    def _assign_reduce(self, node, name, left, right, sub, pre):
        """

        Parameters
        ----------
        node
            The node argument to this op's c_code.
        name
            The name argument to this op's c_code.
        left
            A C code string identifying an lvalue.
        right
            A C code string identifying an expression.
        sub
            The sub argument to this op's c_code.
        pre
            If True, we will add the pre_scalar_op.c_code.

        Returns
        -------
        str
            C code to reduce left and right, assigning the result to left.

        """

        (x,) = node.inputs
        in_dtype = x.dtype
        out_dtype = node.outputs[0].dtype

        dummy_left = Scalar(dtype=out_dtype)()
        dummy_right = Scalar(dtype=in_dtype)()

        dummy_node = self.scalar_op.make_node(dummy_left, dummy_right)

        dummy_name = name + "_scalar_op" + str(self._n_scalar_op_calls)
        self._n_scalar_op_calls += 1

        if pre and self.pre_scalar_op:
            assert left == "myresult"
            dummy_node = self.pre_scalar_op.make_node(dummy_left)
            dummy_name = name + "_scalar_op" + str(self._n_scalar_op_calls)
            self._n_scalar_op_calls += 1
            t = self.pre_scalar_op.c_code(dummy_node, dummy_name, (right,), ("",), sub)
            assert t.startswith(" = ")
            right = t[3:]
            if right[-1] == ";":
                right = right[:-1]

        return self.scalar_op.c_code(
            dummy_node, dummy_name, (left, right), (left,), sub
        )

    def _k_reduce_buf(self, z_pos, node, name, sub):
        """
        WRITEME

        Parameters
        ----------
        node, name, sub
            These should be passed through from the original call to c_code.

        """
        in_dtype = "npy_" + node.inputs[0].dtype
        out_dtype = "npy_" + node.outputs[0].dtype
        acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
        write_out = write_w(node.outputs[0].dtype)

        current_version = """
        __syncthreads(); // some kernel do multiple reduction.
        buf[threadNum] = myresult;
        __syncthreads();

        // rest of function is handled by one warp
        if (threadNum < warpSize) {
            //round up all the partial sums into the first `warpSize` elements
            for (int i = threadNum + warpSize; i < threadCount; i += warpSize)
            {
                """
        current_version += (
            self._assign_reduce(node, name, "myresult", "buf[i]", sub, False)
            + """
            }
            buf[threadNum] = myresult;
        }
        __syncthreads();
        for (unsigned int _n = warpSize / 2; _n > 0; _n /= 2) {
            if (threadNum < _n && threadNum + _n < threadCount)
            """
        )
        current_version += self._assign_reduce(
            node, name, "buf[threadNum]", "buf[threadNum+_n]", sub, False
        )

        current_version += """
            __syncthreads();
        }
        if (threadNum == 0) {
          %(z_pos)s = %(write_out)s(buf[0]);
        }
        """

        current_version = current_version % locals()

        return current_version

    # Threads must be organized as: threadNum%nb_reduce correspond to the same sum
    # nb_reduce<=warpSize
    def _k_reduce_buf_multiple(self, z_pos, node, name, nb_reduce):
        reduce_fct = self._assign_reduce(node, name, "myresult", "buf[i]", {}, False)
        write_out = write_w(node.outputs[0].dtype)

        return (
            """
        __syncthreads(); // some kernel do multiple reduction.
        buf[threadNum] = myresult;
        __syncthreads();

        // rest of function is handled by one warp
        if (threadNum < %(nb_reduce)s)
        {
            //round up all the partial sums into the first `nb_reduce` elements
            for (int i = threadNum + %(nb_reduce)s; i < threadCount; i += %(nb_reduce)s)
            {
                %(reduce_fct)s;
            }
            %(z_pos)s = %(write_out)s(myresult);
        }
        """
            % locals()
        )

    def c_code_reduce_ccontig(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        in_dtype = "npy_" + node.inputs[0].dtype
        out_dtype = "npy_" + node.outputs[0].dtype
        if getattr(self.scalar_op, "identity", None) == 0:
            zero_shp = f"GpuArray_memset(&{z}->ga, 0)"
        # TODO: elif getattr(self.scalar_op, 'identity', None) == 1:
        else:
            zero_shp = (
                """
            PyErr_Format(PyExc_NotImplementedError,
                         "GpuCAReduceCuda not implemented when input shape is 0 for this scalar_op");
            %(fail)s;
            """
                % locals()
            )

        acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
        k_var = f"kernel_reduce_ccontig_{name}"
        err_check = (
            """
            if (err != GA_NO_ERROR) {
                PyErr_Format(PyExc_RuntimeError,
                             "gpuarray error: %(k_var)s: %%s.",
                             GpuKernel_error(&%(k_var)s, err));
                %(fail)s;
            }
        """
            % locals()
        )

        print(
            """
        {
          if(PyGpuArray_SIZE(%(x)s)==0){
            %(zero_shp)s;
          }else{
            int verbose = %(verbose)s;
            size_t numEls = PyGpuArray_SIZE(%(x)s);
            size_t n_threads = std::min(numEls, (size_t) 256);
            size_t n_blocks = 1;
            void *kernel_params[] = {(void *)&numEls,
                                     (void *)%(x)s->ga.data,
                                     (void *)&%(x)s->ga.offset,
                                     (void *)%(z)s->ga.data,
                                     (void *)&%(z)s->ga.offset};
            if (verbose) printf("running kernel_reduce_ccontig_%(name)s"
                                " n_threads=%%llu, size=%%llu, ndim=%%u\\n",
                                n_threads, numEls,
                                PyGpuArray_NDIM(%(x)s));
            size_t n_shared = sizeof(%(acc_dtype)s) * n_threads;
            int err = GpuKernel_call(&%(k_var)s, 1, &n_blocks, &n_threads, n_shared, kernel_params);
            %(err_check)s
         }
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_1(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        print(
            """
        {
            int verbose = %(verbose)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
            size_t n_blocks[3] = {1, 1, 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_11(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        print(
            """
        {
            int verbose = %(verbose)s;

            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
            while (n_threads[1] * n_threads[0] <= 256) ++n_threads[1];
            n_threads[1] -= 1;
            if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
                n_threads[1] = PyGpuArray_DIMS(%(x)s)[0];

            size_t n_blocks[3] = {1, 1, 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_01X(self, sio, node, name, x, z, fail, N):
        """

        Parameters
        ----------
        N
            The number of 1 in the pattern N=1 -> 01, N=2 -> 011 N=3 ->0111
            Work for N=1,2,3.

        """

        assert N in [1, 2, 3]
        verbose = self.verbose
        in_dtype = "npy_" + node.inputs[0].dtype
        out_dtype = "npy_" + node.outputs[0].dtype
        makecall = self._makecall(node, name, x, z, fail)
        N_pattern = "".join(["1"] * N)
        param_dim = ",".join([f"PyGpuArray_DIMS({x})[{i}]" for i in range(N + 1)])
        strides_dim = ",".join(
            [f"PyGpuArray_STRIDES({x})[{i}]/sizeof({in_dtype})" for i in range(N + 1)]
        )

        threads_y = (
            """
            //get as many y threads as we can fit
            while (n_threads[0] * (n_threads[1]+1) <= 256)
            {
                if (n_threads[1] < PyGpuArray_DIMS(%(x)s)[%(N)s-1])
                    n_threads[1] += 1;
                else
                    break;
            }"""
            % locals()
        )

        threads_z = (
            """
            //get as many z threads as we can fit
            while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
            {
                if (n_threads[2] < PyGpuArray_DIMS(%(x)s)[%(N)s-2])
                    n_threads[2] += 1;
                else
                    break;
            }
            //Maximum for Fermi GPU on that dimensions.
            n_threads[2] = std::min(n_threads[2], (size_t)64);
        """
            % locals()
        )

        if len(self.reduce_mask) == 2:
            threads_y = ""
            threads_z = ""

        if len(self.reduce_mask) == 3:
            threads_z = ""

        print(
            """
        {
            int verbose = %(verbose)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[%(N)s], (size_t) 256), 1, 1};
            %(threads_y)s
            %(threads_z)s
            size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_01(self, sio, node, name, x, z, fail):
        self.c_code_reduce_01X(sio, node, name, x, z, fail, 1)

    def c_code_reduce_011(self, sio, node, name, x, z, fail):
        self.c_code_reduce_01X(sio, node, name, x, z, fail, 2)

    def c_code_reduce_0111(self, sio, node, name, x, z, fail):
        self.c_code_reduce_01X(sio, node, name, x, z, fail, 3)

    def c_code_reduce_10(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        in_dtype = "npy_" + node.inputs[0].dtype
        out_dtype = "npy_" + node.outputs[0].dtype
        acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
        k_var = f"kernel_reduce_10_{name}"
        err_check = (
            """
            if (err != GA_NO_ERROR) {
                PyErr_Format(PyExc_RuntimeError,
                             "gpuarray error: %(k_var)s: %%s.",
                             GpuKernel_error(%(k_var)s, err));
                %(fail)s;
            }
        """
            % locals()
        )

        print(
            """
    {
        int verbose = %(verbose)s;
        if(PyGpuArray_STRIDES(%(x)s)[0]>
           PyGpuArray_STRIDES(%(x)s)[1]){
                // If there are a lot of summations to do, then we can use simple parallelization -
                // use each thread to do one sum.

                // we might as well launch blocks of 32 threads because that's the warp size.
                // we could schedule more threads if we were maxing out the gridsize below, but
                // the gridsize is way more than the physical hardware and I think 32 threads
                // on a huge grid is enough to fully use the hardware.
                size_t n_threads[3] = {32, 1, 1};

                // We kindof reshape the input implicitly to something 4D:
                //  the shape A,B,C    ->   A, B, D, E
                //  where C <= D*E < C+32
                //  where E==32

                GpuKernel *%(k_var)s = &kernel_reduce_010_AD_%(name)s;
                size_t A = 1;
                size_t B = PyGpuArray_DIMS(%(x)s)[0];
                size_t C = PyGpuArray_DIMS(%(x)s)[1];
                size_t D = C/32;
                if (32*D < C) D+= 1;
                assert ((C <= 32*D) && (32*D < C+32));

                // The gridsize would ideally be (A, D).  But we do the following logic to make
                // sure we don't ask for a grid that is too big.
                size_t n_blocks[3] = {A, D, 1};
                if (n_blocks[0] > 4096) n_blocks[0] = 4096;
                if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
                ssize_t stride_A0 = 1;
                ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
                ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
                ssize_t stride_Z0 = 1;
                ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
                void *kernel_params[] = {
                        (void *)&A, (void *)&B, (void *)&C, (void *)&D,
                        (void *)%(x)s->ga.data,
                        (void *)&%(x)s->ga.offset,
                        (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
                        (void *)%(z)s->ga.data,
                        (void *)&%(z)s->ga.offset,
                        (void *)&stride_Z0, (void *)&stride_Z1};
                int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
                %(err_check)s
        }else{
            GpuKernel *%(k_var)s = &kernel_reduce_010_%(name)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
            size_t n_blocks[3] = {1, std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 4096), 1};
            if (verbose) {
              fprintf(stderr,
                "running kernel_reduce_10_%(name)s n_blocks=(%%llu,%%llu)\\n",
                (unsigned long long)n_blocks[0],
                (unsigned long long)n_blocks[1]);
            }
            assert(PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]);
            size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0];
            size_t dim_0 = 1;
            ssize_t stride_A0 = 1;
            ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
            ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
            ssize_t stride_Z0 = 1;
            ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
            void *kernel_params[] = {
                    (void *)&dim_0,
                    (void *)&PyGpuArray_DIMS(%(x)s)[0],
                    (void *)&PyGpuArray_DIMS(%(x)s)[1],
                    (void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset,
                    (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
                    (void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset,
                    (void *)&stride_Z0, (void *)&stride_Z1};
            int err = GpuKernel_call(%(k_var)s, 3, n_blocks, n_threads, n_shared, kernel_params);
            %(err_check)s
        }
    }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_010(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        makecall_inner = self._makecall(node, name, x, z, fail, pattern="010_inner")
        pattern = "".join(str(i) for i in self.reduce_mask)
        in_dtype = "npy_" + node.inputs[0].dtype
        out_dtype = "npy_" + node.outputs[0].dtype
        k_var = f"kernel_reduce_010_AD_{name}"
        err_check = (
            """
            if (err != GA_NO_ERROR) {
                PyErr_Format(PyExc_RuntimeError,
                             "gpuarray error: %(k_var)s: %%s.",
                             GpuKernel_error(&%(k_var)s, err));
                %(fail)s;
            }
        """
            % locals()
        )
        print(
            """
        {
            //int n_summations = PyGpuArray_DIMS(%(x)s)[0] * PyGpuArray_DIMS(%(x)s)[2];

            //if ((n_summations >= 15 * 32) && (PyGpuArray_DIMS(%(x)s)[2]>=16))
            if (1) // if the alternative is less buggy, consider not using this branch
            {
                // If there are a lot of summations to do, then we can use simple parallelization -
                // use each thread to do one sum.

                // we might as well launch blocks of 32 threads because that's the warp size.
                // we could schedule more threads if we were maxing out the gridsize below, but
                // the gridsize is way more than the physical hardware and I think 32 threads
                // on a huge grid is enough to fully use the hardware.
                size_t n_threads[3] = {32, 1, 1};

                // We kindof reshape the input implicitly to something 4D:
                //  the shape A,B,C    ->   A, B, D, E
                //  where C <= D*E < C+32
                //  where E==32

                size_t A = PyGpuArray_DIMS(%(x)s)[0];
                size_t B = PyGpuArray_DIMS(%(x)s)[1];
                size_t C = PyGpuArray_DIMS(%(x)s)[2];
                size_t D = C/32;
                if (32*D < C) D+= 1;
                assert ((C <= 32*D) && (32*D < C+32));

                // The gridsize would ideally be (A, D).  But we do the following logic to make
                // sure we don't ask for a grid that is too big.
                size_t n_blocks[3] = {A, D, 1};
                if (n_blocks[0] > 4096) n_blocks[0] = 4096;
                if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
                ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
                ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
                ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
                ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
                ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
                void *kernel_params[] = {
                        (void *)&A, (void *)&B, (void *)&C, (void *)&D,
                        (void *)%(x)s->ga.data,
                        (void *)&%(x)s->ga.offset,
                        (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
                        (void *)%(z)s->ga.data,
                        (void *)&%(z)s->ga.offset,
                        (void *)&stride_Z0, (void *)&stride_Z1};
                int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
                %(err_check)s
            }
            else
            {
                int verbose = %(verbose)s;

                  size_t n_threads[3] = {std::min((size_t) 32, PyGpuArray_DIMS(%(x)s)[2]), 1, 1};
                  while(    (n_threads[0]*(n_threads[1]+1)<=256)
                         && (n_threads[1]<PyGpuArray_DIMS(%(x)s)[1])){
                      n_threads[1]++;
                  }

                  size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096), 1, 1};
                  n_blocks[1] = std::min(
                      ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
                                  (size_t)n_threads[0]),
                      (size_t)(4096 / n_blocks[0])
                      );
                if(std::min(std::min(PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s),
                                     PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s)),
                            PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s))
                   ==PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s)
                  && n_blocks[1]==ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],
                                             (size_t)n_threads[0])){
                  if(verbose>1)
                    printf("n_block.x.1=%%d, n_block.x.2=%%d, n_block.y.1=%%d, n_block.y.2=%%d,\\n",
                           PyGpuArray_DIMS(%(x)s)[0],4096,
                           ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],(size_t)n_threads[0]),
                                       (size_t)(4096 / n_blocks[0]));
                  assert(n_threads[0]<=32);
                  %(makecall_inner)s
                }else{
                  n_threads[0] = std::min(PyGpuArray_DIMS(%(x)s)[1],
                                         (size_t) 256);
                  n_blocks[0] = std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096);
                  n_blocks[1] = std::min(
                      PyGpuArray_DIMS(%(x)s)[2],
                      (size_t)(4096 / n_blocks[0])
                      );
                  %(makecall)s
                }
            }
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_0101(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        print(
            """
        {
            int verbose = %(verbose)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
            while (n_threads[0] * n_threads[1] <= 256)
            {
                if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break;
                n_threads[1] += 1;
            }
            n_threads[1] -= 1;
            size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[0], PyGpuArray_DIMS(%(x)s)[2], 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_100(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        in_dtype = "npy_" + node.inputs[0].dtype
        out_dtype = "npy_" + node.outputs[0].dtype
        acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
        k_var = f"kernel_reduce_010_AD_{name}"
        err_check = (
            """
            if (err != GA_NO_ERROR) {
                PyErr_Format(PyExc_RuntimeError,
                             "gpuarray error: %(k_var)s: %%s.",
                             GpuKernel_error(&%(k_var)s, err));
                %(fail)s;
            }
        """
            % locals()
        )
        # use threadIdx.x for i0
        # use blockIdx.x for i1
        # use blockIdx.y for i2
        print(
            """
        {
            int verbose = %(verbose)s;
            if (PyGpuArray_STRIDES(%(x)s)[2] != sizeof(%(in_dtype)s)){
                size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1};
                size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t)4096), 1, 1};
                while (n_blocks[0] * (n_blocks[1]+1) <= 4096 &&
                       n_blocks[1] <= PyGpuArray_DIMS(%(x)s)[2])
                {
                    n_blocks[1] += 1;
                }
                %(makecall)s
            }
            else
            {   // reuse 010_AD kernel, we transpose the 2 first dim
                // See the reduction for the real 010_AD kernel for
                // explanation. We do this to get coalesced read.
                size_t n_threads[3] = {32, 1, 1};

                size_t A = PyGpuArray_DIMS(%(x)s)[1];
                size_t B = PyGpuArray_DIMS(%(x)s)[0];
                size_t C = PyGpuArray_DIMS(%(x)s)[2];
                size_t D = C/32;
                if (32*D < C) D+= 1;
                assert ((C <= 32*D) && (32*D < C+32));

                // The gridsize would ideally be (A, D).  But we do the following logic to make
                // sure we don't ask for a grid that is too big.
                size_t n_blocks[3] = {A, D, 1};
                if (n_blocks[0] > 4096) n_blocks[0] = 4096;
                if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0];
                size_t n_shared = 0;
                ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s);
                ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s);
                ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s);
                ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s);
                ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s);
                void *kernel_params[] = {
                        (void *)&A, (void *)&B, (void *)&C, (void *)&D,
                        (void *)%(x)s->ga.data,
                        (void *)&%(x)s->ga.offset,
                        (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2,
                        (void *)%(z)s->ga.data,
                        (void *)&%(z)s->ga.offset,
                        (void *)&stride_Z0, (void *)&stride_Z1};
                int err = GpuKernel_call(&%(k_var)s, 3, n_blocks, n_threads, 0, kernel_params);
                %(err_check)s
            }
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_110(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        print(
            """
        {
            int verbose = %(verbose)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1};
            while (n_threads[0]*n_threads[1] <= 256)
            {
                if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0])
                    break;
                n_threads[1] += 1;
            }
            n_threads[1] -= 1;

            size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[2], 1, 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_001(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        print(
            """
        {
            int verbose = %(verbose)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};
            size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};
            while (n_blocks[0] * n_blocks[1] <= 4096)
            {
                if (n_blocks[1] > PyGpuArray_DIMS(%(x)s)[1])
                    break;
                n_blocks[1] += 1;
            }
            n_blocks[1] -= 1;
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_101(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(
            node,
            name,
            x,
            z,
            fail,
            extra_dims=[("size_t one = 1;", "(void *) &one")],
            extra_strides=[("ssize_t sone = 1;", "(void *) &sone")],
            pattern="1011",
        )
        print(
            """
        {
            int verbose = %(verbose)s;
//            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3],
//                                            (size_t) 256), 1, 1};
            size_t n_threads[3] = {1, 1, 1};

            while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
            if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
                n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];

            while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256)
                ++n_threads[2];
            if (n_threads[2] > 64)
                n_threads[2] = 64;
            if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
                n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];

            size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_111(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        print(
            """
        {
            int verbose = %(verbose)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};

            //get as many y threads as we can fit
            while (n_threads[0] * n_threads[1] <= 256)
            {
                if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
                    break;
                n_threads[1] += 1;
            }
            n_threads[1] -= 1;

            //get as many z threads as we can fit
            while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
            {
                if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
                    break;
                n_threads[2] += 1;
            }
            n_threads[2] -= 1;
            //Maximum for Fermi GPU on that dimensions.
            n_threads[2] = std::min(n_threads[2], (size_t)64);

            size_t n_blocks[3] = {1, 1, 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_0011(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        in_dtype = "npy_" + node.inputs[0].dtype
        out_dtype = "npy_" + node.outputs[0].dtype
        acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype)
        print(
            """
        {
            int verbose = %(verbose)s;

            size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1};

            while (n_blocks[0] * n_blocks[1] <= 4096 &&
                   n_blocks[1] < PyGpuArray_DIMS(%(x)s)[1])
            {
                n_blocks[1] += 1;
            }

            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};
            while (n_threads[0] * n_threads[1] <= 256
                   && n_threads[1] < PyGpuArray_DIMS(%(x)s)[2]
                   && n_threads[0] * n_threads[1] * sizeof(%(acc_dtype)s) <=(15*1024-200))
            {
                n_threads[1] += 1;
            }

            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_1111(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        print(
            """
        {
            int verbose = %(verbose)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1};

            //get as many y threads as we can fit
            while (n_threads[0] * n_threads[1] <= 256)
            {
                if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1])
                    break;
                n_threads[1] += 1;
            }
            n_threads[1] -= 1;

            //get as many z threads as we can fit
            while (n_threads[0] * n_threads[1] * n_threads[2] <= 256)
            {
                if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
                    break;
                n_threads[2] += 1;
            }
            n_threads[2] -= 1;

            //Maximum for Fermi GPU on that dimensions.
            n_threads[2] = std::min(n_threads[2], (size_t)64);

            size_t n_blocks[3] = {1, 1, 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_reduce_1011(self, sio, node, name, x, z, fail):
        verbose = self.verbose
        makecall = self._makecall(node, name, x, z, fail)
        print(
            """
        {
            int verbose = %(verbose)s;
            size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1};

            while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1];
            if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2])
                n_threads[1] = PyGpuArray_DIMS(%(x)s)[2];

            while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) ++n_threads[2];
            if (n_threads[2] > 64)
                n_threads[2] = 64;
            if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0])
                n_threads[2] = PyGpuArray_DIMS(%(x)s)[0];

            size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1};
            %(makecall)s
        }
        """
            % locals(),
            file=sio,
        )

    def c_code_cache_version_apply(self, node):
        version = [
            24,
            self.verbose,
        ]  # the version corresponding to the c code in this Op

        # now we insert versions for the ops on which we depend...
        scalar_node = Apply(
            self.scalar_op,
            [Scalar(dtype=input.type.dtype)() for input in node.inputs],
            [Scalar(dtype=output.type.dtype)() for output in node.outputs],
        )
        version.extend(self.scalar_op.c_code_cache_version_apply(scalar_node))
        for i in node.inputs + node.outputs:
            version.extend(Scalar(dtype=i.type.dtype).c_code_cache_version())
        version.extend(self.kernel_version(node))
        if all(version):
            return tuple(version)
        else:
            return ()

    def gpu_kernels(self, node, nodename):
        nd_in = len(self.reduce_mask)
        in_dtype = node.inputs[0].dtype
        out_dtype = node.outputs[0].dtype
        acc_dtype = self._acc_dtype(node.inputs[0].dtype)
        assign_dtype = in_dtype
        flags = Kernel.get_flags(in_dtype, acc_dtype, out_dtype)
        in_type = gpuarray.dtype_to_ctype(in_dtype)
        out_type = gpuarray.dtype_to_ctype(out_dtype)
        acc_type = gpuarray.dtype_to_ctype(acc_dtype)
        load_in = load_w(in_dtype)
        write_out = write_w(out_dtype)
        kernels = []

        if all(i == 1 for i in self.reduce_mask):
            # this kernel is ok for up to a few thousand elements, but
            # it only runs on ONE multiprocessor
            reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
            reduce_fct = self._assign_reduce(
                node, nodename, "myresult", load_in + "(A[i0])", {}, True
            )
            reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
            kname = "kernel_reduce_ccontig"
            k_var = "kernel_reduce_ccontig_" + nodename
            sio = StringIO()
            print(
                """#include "cluda.h"

            KERNEL void %(kname)s(
                    const ga_size d0,
                    const %(in_type)s *A, const ga_size offset_A,
                    %(out_type)s *Z, const ga_size offset_Z)
            {
                const int threadCount = blockDim.x;
                const int threadNum = threadIdx.x;
                extern __shared__ %(acc_type)s buf[];
                A = (const %(in_type)s *)(((char *)A)+offset_A);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);
                %(acc_type)s myresult = %(reduce_init)s;

                for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
                {
                    %(reduce_fct)s
                }
                %(reducebuf)s
            }
            """
                % locals(),
                file=sio,
            )
            params = ["uintp", gpuarray.GpuArray, "uintp", gpuarray.GpuArray, "uintp"]
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (1,):
            # this kernel is ok for up to a few thousand elements, but
            # it only runs on ONE multiprocessor
            reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
            reduce_fct = self._assign_reduce(
                node, nodename, "myresult", load_in + "(A[i0 * sA0])", {}, True
            )
            reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
            kname = "kernel_reduce_1"
            k_var = "kernel_reduce_1_" + nodename
            sio = StringIO()
            print(
                """#include "cluda.h"

            KERNEL void %(kname)s(
                    const ga_size d0,
                    const %(in_type)s *A, const ga_size offset_A,
                    const ga_ssize sA0,
                    %(out_type)s * Z, const ga_size offset_Z)
            {
                const int threadCount = blockDim.x;
                const int threadNum = threadIdx.x;
                extern __shared__ %(acc_type)s buf[];
                A = (const %(in_type)s *)(((char *)A)+offset_A);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);
                %(acc_type)s myresult = %(reduce_init)s;

                for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
                {
                    %(reduce_fct)s
                }
                %(reducebuf)s
            }
            """
                % locals(),
                file=sio,
            )
            params = [
                "uintp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                gpuarray.GpuArray,
                "uintp",
            ]
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (1, 1):
            # this kernel is ok for up to a few thousand elements, but
            # it only runs on ONE multiprocessor
            reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1])",
                {},
                True,
            )
            reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
            kname = "kernel_reduce_11"
            k_var = "kernel_reduce_11_" + nodename
            sio = StringIO()
            print(
                """#include "cluda.h"

            KERNEL void %(kname)s(
                    const ga_size d0, const ga_size d1,
                    const %(in_type)s *A, const ga_size offset_A,
                    const ga_ssize sA0, const ga_ssize sA1,
                    %(out_type)s * Z, const ga_size offset_Z)
            {
                const int threadCount = blockDim.x * blockDim.y;
                const int threadNum = threadIdx.y*blockDim.x + threadIdx.x;
                extern __shared__ %(acc_type)s buf[];
                A = (const %(in_type)s *)(((char *)A)+offset_A);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);
                %(acc_type)s myresult = %(reduce_init)s;

                for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
                {
                    for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
                    {
                        %(reduce_fct)s;
                    }
                }
                %(reducebuf)s
            }
            """
                % locals(),
                file=sio,
            )
            params = [
                "uintp",
                "uintp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
                gpuarray.GpuArray,
                "uintp",
            ]
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        # 01, 011, 0111
        if (
            0 == self.reduce_mask[0]
            and all(self.reduce_mask[1:])
            and nd_in in [2, 3, 4]
        ):
            # this kernel uses one block for each row.
            # threads per block for each element per row.

            N_pattern = "".join(["1"] * (nd_in - 1))
            # TODO: is it faster to hardcode sA3, etc. in the later
            # code, rather than have the for_* variables declare them
            # and the later code use their names?
            if nd_in == 2:
                for_i1 = "for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)"
                first_i1 = "threadIdx.x"
                sA1 = "sA1"
                for_i2 = "int i2=0, sA2=0;"
                sA2 = "0"
                first_i2 = "0"
                for_i3 = "int i3=0, sA3=0;"
                sA3 = "0"
                first_i3 = "0"
            if nd_in == 3:
                for_i1 = "for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)"
                first_i1 = "threadIdx.y"
                sA1 = "sA1"
                for_i2 = "for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)"
                first_i2 = "threadIdx.x"
                sA2 = "sA2"
                for_i3 = "int i3=0, sA3=0;"
                first_i3 = 0
                sA3 = "0"
            if nd_in == 4:
                for_i1 = "for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)"
                first_i1 = "threadIdx.z"
                sA1 = "sA1"
                for_i2 = "for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)"
                first_i2 = "threadIdx.y"
                sA2 = "sA2"
                for_i3 = "for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)"
                first_i3 = "threadIdx.x"
                sA3 = "sA3"

            reducebuf = self._k_reduce_buf("Z[i0 * sZ0]", node, nodename, sub={})
            param_dim = ",".join([f"const ga_size d{i}" for i in range(nd_in)])
            param_strides = ",".join([f"const ga_ssize sA{i}" for i in range(nd_in)])
            decl, kname, params, k_var = self._k_decl(node, nodename)
            init = self._k_init(node, nodename)
            reduce_init = self._assign_init(
                load_in
                + "(A[%(first_i3)s * %(sA3)s + %(first_i2)s * %(sA2)s + %(first_i1)s * %(sA1)s + i0 * sA0])"
                % locals(),
                assign_dtype,
            )
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i3 * sA3 + i2 * sA2 + i1 * sA1 + i0 * sA0])",
                {},
                True,
            )
            sio = StringIO()
            print(
                """#include "cluda.h"

                %(decl)s{
                    %(init)s
                    for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x){
                      myresult = %(reduce_init)s;
                      %(for_i1)s{
                        %(for_i2)s{
                          %(for_i3)s{
                            %(reduce_fct)s;
                          }
                        }
                      }
                      %(reducebuf)s
                    }
                }
                """
                % locals(),
                file=sio,
            )
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0):
            # this kernel uses one block for each column,
            # threads per block for each element per column.

            # TODO: This kernel is pretty inefficient in terms of reading, because if A is
            #      c_contiguous (typical case) then each warp is accessing non-contigous
            #      memory (a segment of a column).
            reducebuf = self._k_reduce_buf(
                "Z[i0 * sZ0 + i2*sZ1]", node, nodename, sub={}
            )
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(A[i0 * sA0 + threadIdx.x * sA1 + i2 * sA2])", assign_dtype
            )
            kname = "kernel_reduce_010"
            k_var = "kernel_reduce_010_" + nodename
            sio = StringIO()
            print(
                """#include "cluda.h"

            KERNEL void %(kname)s(
                    const ga_size d0, const ga_size d1, const ga_size d2,
                    const %(in_type)s *A, const ga_size offset_A,
                    const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
                    %(out_type)s * Z, const ga_size offset_Z,
                    const ga_ssize sZ0, const ga_ssize sZ1)
            {
                const int threadCount = blockDim.x;
                const int threadNum = threadIdx.x;
                extern __shared__ %(acc_type)s buf[];
                A = (const %(in_type)s *)(((char *)A)+offset_A);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);

                for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
                {
                    for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
                    {
                        %(acc_type)s myresult = %(reduce_init)s;
                        for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
                        {
                            %(reduce_fct)s;
                        }
                        %(reducebuf)s
                    }
                }

            }
            """
                % locals(),
                file=sio,
            )
            params = [
                "uintp",
                "uintp",
                "uintp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
                "intp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
            ]
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask in [(0, 1, 0), (1, 0), (1, 0, 0)]:
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(X[a * sX0 + b * sX1 + c * sX2])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(X[a * sX0 + 0 * sX1 + c * sX2])", assign_dtype
            )
            kname = "kernel_reduce_010_AD"
            k_var = "kernel_reduce_010_AD_" + nodename
            sio = StringIO()
            print(
                """#include "cluda.h"

            KERNEL void %(kname)s(
                    const ga_size A, const ga_size B, const ga_size C, const ga_size D,
                    const %(in_type)s *X, const ga_size offset_X,
                    const ga_ssize sX0, const ga_ssize sX1, const ga_ssize sX2,
                    %(out_type)s * Z, const ga_size offset_Z,
                    const ga_ssize sZ0, const ga_ssize sZ1)
            {
                const int threadCount = blockDim.x;
                const int threadNum = threadIdx.x;
                X = (const %(in_type)s *)(((char *)X)+offset_X);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);
                %(acc_type)s myresult = 0;

                for (int a = blockIdx.x; a < A; a += gridDim.x)
                {
                    for (int i2_D = blockIdx.y; i2_D < D; i2_D += gridDim.y)
                    {
                        int c = i2_D * 32 + threadIdx.x;
                        if (c < C)
                        {
                            myresult = %(reduce_init)s;
                            for (int b = 0; b < B; ++b)
                            {
                                %(reduce_fct)s;
                            }
                            Z[a * sZ0 + c * sZ1] = %(write_out)s(myresult);
                        }
                    }
                }

            }
            """
                % locals(),
                file=sio,
            )
            params = [
                "uintp",
                "uintp",
                "uintp",
                "uintp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
                "intp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
            ]
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (0, 1, 0):
            #
            # This kernel is optimized when the inner most dimensions
            # have the smallest stride.

            # this kernel uses one block for multiple column(up to 32TODO),
            # threads per block for each element per column.

            # thread.x = dim 2 contiguous
            # thread.y = dim 1
            # block.x = dim 0
            # block.y = dim 1 rest
            init = self._k_init(node, nodename)
            decl, kname, params, k_var = self._k_decl(
                node, nodename, pattern="010_inner"
            )
            reducebuf = self._k_reduce_buf_multiple(
                "Z[i0 * sZ0 + i2*sZ1]", node, nodename, "blockDim.x"
            )
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(A[i0 * sA0 + 0 * sA1 + i2 * sA2])", assign_dtype
            )
            sio = StringIO()
            print(
                """#include "cluda.h"

            %(decl)s
            {
              %(init)s
              for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
              {
                for (int i2 = blockIdx.y*blockDim.x+threadIdx.x; i2 < d2; i2 += gridDim.y*blockDim.x)
                 {
                  myresult = %(reduce_init)s;
                  for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
                  {
                      %(reduce_fct)s;
                  }
                  %(reducebuf)s
                 }
              }
            }
            """
                % locals(),
                file=sio,
            )
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (1, 1, 0):
            # this kernel uses one block for each column,
            # threads per block for each element per column.

            # TODO: This kernel is pretty inefficient in terms of reading, because if A is
            #      c_contiguous (typical case) then each warp is accessing non-contigous
            #      memory (a segment of a column).
            reducebuf = self._k_reduce_buf(
                "Z[blockIdx.x * sZ0]", node, nodename, sub={}
            )
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(A[blockIdx.x * sA2])", assign_dtype
            )
            kname = "kernel_reduce_110"
            k_var = "kernel_reduce_110_" + nodename
            sio = StringIO()
            print(
                """#include "cluda.h"

            KERNEL void %(kname)s(
                    const ga_size d0, const ga_size d1, const ga_size d2,
                    const %(in_type)s *A, const ga_size offset_A,
                    const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
                    %(out_type)s * Z, const ga_size offset_Z,
                    const ga_ssize sZ0)
            {
                const int threadCount = blockDim.x * blockDim.y;
                const int threadNum = threadIdx.y * blockDim.x + threadIdx.x;
                extern __shared__ %(acc_type)s buf[];
                A = (const %(in_type)s *)(((char *)A)+offset_A);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);
                %(acc_type)s myresult = %(reduce_init)s;

                for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y)
                {
                    for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)
                    {
                        %(reduce_fct)s;
                    }
                }

                %(reducebuf)s
            }
            """
                % locals(),
                file=sio,
            )
            params = [
                "uintp",
                "uintp",
                "uintp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
                "intp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
            ]
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (1, 0, 0):
            reducebuf = self._k_reduce_buf(
                "Z[i1 * sZ0 + i2 * sZ1]", node, nodename, sub={}
            )
            decl, kname, params, k_var = self._k_decl(node, nodename)
            init = self._k_init(node, nodename)
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(A[i1 * sA1 + i2 * sA2])", assign_dtype
            )
            sio = StringIO()
            print(
                """#include "cluda.h"

            %(decl)s
            {
                %(init)s
                for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
                {
                    for (int i1 = blockIdx.x; i1 < d1; i1 += gridDim.x)
                    {
                        myresult = %(reduce_init)s;
                        for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x)
                        {
                            %(reduce_fct)s
                        }
                        %(reducebuf)s
                    }
                }
            }
            """
                % locals(),
                file=sio,
            )
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (1, 1, 1):
            reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
            decl, kname, params, k_var = self._k_decl(node, nodename)
            init = self._k_init(node, nodename)
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
                {},
                True,
            )
            reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
            sio = StringIO()
            print(
                """#include "cluda.h"

            %(decl)s
            {
                %(init)s
                myresult = %(reduce_init)s;
                for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
                {
                    for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
                    {
                        for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
                        {
                            %(reduce_fct)s;
                        }
                    }
                }
                %(reducebuf)s
            }
            """
                % locals(),
                file=sio,
            )
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (0, 0, 1):
            # this kernel uses one block for each row,
            # threads per block for each element per row.
            reducebuf = self._k_reduce_buf(
                "Z[i0 * sZ0 + i1 * sZ1]", node, nodename, sub={}
            )
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(A[i0 * sA0 + i1 * sA1])", assign_dtype
            )
            kname = "kernel_reduce_001"
            k_var = "kernel_reduce_001_" + nodename
            sio = StringIO()
            print(
                """#include "cluda.h"
            KERNEL void %(kname)s(
                    const ga_size d0, const ga_size d1, const ga_size d2,
                    const %(in_type)s *A, const ga_size offset_A,
                    const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2,
                    %(out_type)s * Z, const ga_size offset_Z,
                    const ga_ssize sZ0, const ga_ssize sZ1)
            {
                const int threadCount = blockDim.x;
                const int threadNum = threadIdx.x;
                extern __shared__ %(acc_type)s buf[];
                A = (const %(in_type)s *)(((char *)A)+offset_A);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);

                for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
                {
                    for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
                    {
                        %(acc_type)s myresult = %(reduce_init)s;
                        for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)
                        {
                            %(reduce_fct)s;
                        }
                        %(reducebuf)s
                    }
                }
            }
            """
                % locals(),
                file=sio,
            )
            params = [
                "uintp",
                "uintp",
                "uintp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
                "intp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
            ]
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (0, 0, 1, 1):
            # this kernel uses one block for each row,
            # threads per block for each element per row.
            reducebuf = self._k_reduce_buf(
                "Z[i0 * sZ0 + i1 * sZ1]", node, nodename, sub={}
            )
            decl, kname, params, k_var = self._k_decl(node, nodename)
            init = self._k_init(node, nodename)
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(A[i0 * sA0 + i1 * sA1])", assign_dtype
            )
            sio = StringIO()
            print(
                """#include "cluda.h"

            %(decl)s
            {
                %(init)s

                for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
                {
                    for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y)
                    {
                        %(acc_type)s myresult = %(reduce_init)s;
                    for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
                    {
                        for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
                        {
                            %(reduce_fct)s;
                        }
                    }
                        %(reducebuf)s
                    }
                }
            }
            """
                % locals(),
                file=sio,
            )
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (0, 1, 0, 1):
            # this kernel uses one block for each row,
            # threads per block for each element per row.
            reducebuf = self._k_reduce_buf(
                "Z[i0 * sZ0 + i2 * sZ1]", node, nodename, sub={}
            )
            decl, kname, params, k_var = self._k_decl(node, nodename)
            init = self._k_init(node, nodename)
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(A[i0 * sA0 + i2 * sA2])", assign_dtype
            )
            sio = StringIO()
            print(
                """#include "cluda.h"

            %(decl)s
            {
                %(init)s

                for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x)
                {
                    for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y)
                    {
                        %(acc_type)s myresult = %(reduce_init)s;
                    for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)
                    {
                        for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
                        {
                            %(reduce_fct)s;
                        }
                    }
                        %(reducebuf)s
                    }
                }
            }
            """
                % locals(),
                file=sio,
            )
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (1, 1, 1, 1):
            reducebuf = self._k_reduce_buf("Z[0]", node, nodename, sub={})
            decl, kname, params, k_var = self._k_decl(node, nodename)
            init = self._k_init(node, nodename)
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])",
                {},
                True,
            )
            reduce_init = self._assign_init(load_in + "(A[0])", assign_dtype)
            sio = StringIO()
            print(
                """#include "cluda.h"

            %(decl)s
            {
                %(init)s
                myresult = %(reduce_init)s;
              for (int i0 = 0; i0 < d0; i0++)
                for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)
                {
                    for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
                    {
                        for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
                        {
                            %(reduce_fct)s;
                        }
                    }
                }
                %(reducebuf)s
            }
            """
                % locals(),
                file=sio,
            )
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        if self.reduce_mask == (1, 0, 1, 1) or self.reduce_mask == (1, 0, 1):
            reducebuf = self._k_reduce_buf("Z[blockIdx.x*sZ0]", node, nodename, sub={})
            reduce_fct = self._assign_reduce(
                node,
                nodename,
                "myresult",
                load_in + "(A[i0 * sA0 + blockIdx.x * sA1 + i2 * sA2 + i3 * sA3])",
                {},
                True,
            )
            reduce_init = self._assign_init(
                load_in + "(A[blockIdx.x * sA1])", assign_dtype
            )
            kname = "kernel_reduce_1011"
            k_var = "kernel_reduce_1011_" + nodename
            sio = StringIO()
            print(
                """#include "cluda.h"

            KERNEL void %(kname)s(
                    const ga_size d0, const ga_size d1, const ga_size d2, const ga_size d3,
                    const %(in_type)s *A, const ga_size offset_A,
                    const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, const ga_ssize sA3,
                    %(out_type)s * Z, const ga_size offset_Z,
                    const ga_ssize sZ0)
            {
                const int threadCount = blockDim.x * blockDim.y * blockDim.z;
                const int threadNum = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
                extern __shared__ %(acc_type)s buf[];
                A = (const %(in_type)s *)(((char *)A)+offset_A);
                Z = (%(out_type)s *)(((char *)Z)+offset_Z);
                %(acc_type)s myresult = %(reduce_init)s;

                for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z)
                {
                    for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)
                    {
                        for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)
                        {
                            %(reduce_fct)s;
                        }
                    }
                }
                %(reducebuf)s
            }
            """
                % locals(),
                file=sio,
            )
            params = [
                "uintp",
                "uintp",
                "uintp",
                "uintp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
                "intp",
                "intp",
                "intp",
                gpuarray.GpuArray,
                "uintp",
                "intp",
            ]
            kernels.append(
                Kernel(
                    code=sio.getvalue(),
                    name=kname,
                    params=params,
                    flags=flags,
                    objvar=k_var,
                )
            )
        return kernels


class GpuErfinv(Erfinv):
    """
    Inverse error function for GPU.

    """

    def c_headers(self, **kwargs):
        return ["math_functions.h", "cublas_v2.h"]

    def c_code(self, node, name, inp, out, sub):
        (x,) = inp
        (z,) = out
        if node.inputs[0].type in complex_types:
            raise NotImplementedError("type not supported", type)
        # NB: CUDA erfinv function (GPU op) returns NaN if x not in [-1;1],
        # while `scipy.special.erfinv` (CPU op) returns an infinite (-inf if x < -1, +inf if x > 1).
        # For consistency of CPU and GPU ops, we wrap the CUDA erfinv in the following conditions
        # to ensure that GPU op returns the same values as CPU op.
        return (
            "%(z)s = (%(x)s <= -1) ? erfinv(-1.0): ((%(x)s >= 1) ? erfinv(1.0): erfinv(%(x)s));"
            % locals()
        )


gpu_erfinv = GpuErfinv(upgrade_to_float_no_complex, name="gpu_erfinv")


class GpuErfcinv(Erfcinv):
    """
    Inverse complementary error function for GPU.

    """

    def c_headers(self, **kwargs):
        return ["math_functions.h", "cublas_v2.h"]

    def c_code(self, node, name, inp, out, sub):
        (x,) = inp
        (z,) = out
        if node.inputs[0].type in complex_types:
            raise NotImplementedError("type not supported", type)
        # NB: CUDA erfcinv function (GPU op) returns NaN if x not in [0;2],
        # while `scipy.special.erfcinv` (CPU op) returns an infinite (+inf if x < 0, -inf if x > 2).
        # For consistency of CPU and GPU ops, we wrap the CUDA erfcinv in the following conditions
        # to ensure that GPU op returns the same values as CPU op.
        return (
            "%(z)s = (%(x)s <= 0) ? erfcinv(0.0): ((%(x)s >= 2) ? erfcinv(2.0): erfcinv(%(x)s));"
            % locals()
        )


gpu_erfcinv = GpuErfcinv(upgrade_to_float_no_complex, name="gpu_erfcinv")


# Caching GpuCAReduceCuda
def gpu_ca_reduce_cuda(
    scalar_op,
    axis=None,
    reduce_mask=None,
    dtype=None,
    acc_dtype=None,
    pre_scalar_op=None,
):
    key = (scalar_op, axis, reduce_mask, dtype, acc_dtype, pre_scalar_op)
    if key not in gpu_ca_reduce_cuda.cache:
        gpu_ca_reduce_cuda.cache[key] = GpuCAReduceCuda(
            scalar_op, axis, reduce_mask, dtype, acc_dtype, pre_scalar_op
        )
    return gpu_ca_reduce_cuda.cache[key]


gpu_ca_reduce_cuda.cache = {}


class GpuCAReduceCPY(GpuKernelBase, HideC, CAReduceDtype):
    """
    CAReduce that reuse the python code from gpuarray.

    """

    def __init__(self, scalar_op, axis=None, dtype=None, acc_dtype=None):
        if scalar_op.identity is None:
            raise ValueError("No identity on scalar op")
        super().__init__(scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype)

    def __str__(self):
        ax = ""
        if self.axis is not None:
            ax = f"{{{', '.join(str(x) for x in self.axis)}}}"
        return f"GpuReduce{{{self.scalar_op}}}{ax}"

    def make_node(self, input):
        ctx_name = infer_context_name(input)
        res = super().make_node(input)
        input = as_gpuarray_variable(input, ctx_name)
        otype = GpuArrayType(
            dtype=res.outputs[0].dtype,
            broadcastable=res.outputs[0].broadcastable,
            context_name=ctx_name,
        )

        if res.op.axis is not None:
            redux = []
            for i in range(len(input.type.broadcastable)):
                redux.append(i in res.op.axis)
                # since redux is just another way to describe what is in axis
                # it doesn't need to be compared in __eq__ or __hash__
            res.op.redux = redux

        return Apply(res.op, [input], [otype()])

    def get_params(self, node):
        return node.outputs[0].type.context

    def prepare_node(self, node, storage_map, compute_map, impl):
        # cache the kernel object
        self.get_kernel_cache(node)

    def get_kernel_cache(self, node):
        attr = "@cache_reduction_k"
        if self.axis is None:
            redux = [True] * node.inputs[0].ndim
        else:
            redux = self.redux
        if not hasattr(node, attr):
            acc_dtype = getattr(self, "acc_dtype", None)
            if acc_dtype is None:
                acc_dtype = node.outputs[0].type.dtype
            if any(redux):
                setattr(node, attr, self.generate_kernel(node, acc_dtype, redux))

        if any(redux):
            return getattr(node, attr)

    def gpu_kernels(self, node, name):
        if not any(getattr(self, "redux", [node.inputs[0].ndim != 0])):
            # Some OpenCL compilers do not accept no-arguments empty kernels
            src = '#include "cluda.h"\nKERNEL void reduk(GLOBAL_MEM float *a) { a[0] = 0; }'
            params = ["float32"]
        else:
            k = self.get_kernel_cache(node)
            _, src, _, _ = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim)
            nd = node.inputs[0].ndim
            params = ["uint32", gpuarray.GpuArray, "uint32"]
            params.extend("uint32" for _ in range(nd))
            params.append(gpuarray.GpuArray)
            params.append("uint32")
            params.extend("int32" for _ in range(nd))
        acc_dtype = getattr(self, "acc_dtype", None)
        if acc_dtype is None:
            acc_dtype = node.outputs[0].type.dtype
        return [
            Kernel(
                code=src,
                name="reduk",
                params=params,
                flags=Kernel.get_flags(
                    node.inputs[0].type.dtype, acc_dtype, node.outputs[0].type.dtype
                ),
                objvar="k_reduk_" + name,
            )
        ]

    def c_code(self, node, name, inp, out, sub):
        if not any(getattr(self, "redux", [node.inputs[0].ndim != 0])):
            # We special case the no-reduction case since the gpu
            # kernel has trouble handling it.
            return """
        Py_XDECREF(%(out)s);
        %(out)s = pygpu_copy(%(inp)s, GA_ANY_ORDER);
        if (!%(out)s) {
            %(fail)s
        }

        """ % dict(
                out=out[0], inp=inp[0], fail=sub["fail"]
            )
        k = self.get_kernel_cache(node)
        _, src, _, ls = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim)
        if self.axis is None:
            redux = [True] * node.inputs[0].ndim
        else:
            redux = self.redux
        acc_dtype = getattr(self, "acc_dtype", None)
        if acc_dtype is None:
            acc_dtype = node.outputs[0].type.dtype
        input = inp[0]
        output = out[0]
        nd_out = node.outputs[0].ndim
        code = """
        size_t gs = 1;
        size_t ls;
        unsigned int n = 1;
        unsigned int proxy_dim[%(nd_in)s];
        unsigned int proxy_off;
        int proxy_str[%(nd_in)s];
        void *args[%(n_args)s];
        PyGpuArrayObject *tmp;
        int err;
""" % dict(
            n_args=4 + (node.inputs[0].ndim * 2), nd_in=node.inputs[0].ndim
        )

        if nd_out != 0:
            code += """
        size_t out_dims[%(nd_out)s];
        int need_out = %(output)s == NULL || %(output)s->ga.nd != %(nd_out)s;
""" % dict(
                nd_out=nd_out, output=output
            )
            j = 0
            for i in range(node.inputs[0].ndim):
                if not self.redux[i]:
                    code += """
         out_dims[%(j)s] = %(input)s->ga.dimensions[%(i)s];
         if (!need_out)
             need_out |= %(output)s->ga.dimensions[%(j)s] != out_dims[%(j)s];
""" % dict(
                        j=j, i=i, input=input, output=output
                    )
                    j += 1
            code += """
         if (need_out) {
             %(output)s = pygpu_empty(%(nd_out)s, out_dims, %(out_type)s, GA_C_ORDER, %(ctx)s, Py_None);
             if (!%(output)s) {
                 %(fail)s
             }
         }
        """ % dict(
                output=output,
                nd_out=nd_out,
                fail=sub["fail"],
                ctx=sub["params"],
                out_type=dtype_to_typecode(node.outputs[0].type.dtype),
            )
        else:
            code += """
        if (%(output)s == NULL || %(output)s->ga.nd != 0) {
            Py_XDECREF(%(output)s);
            %(output)s = pygpu_empty(0, NULL, %(out_type)s, GA_C_ORDER,
                                     %(ctx)s, Py_None);
            if (!%(output)s) {
                %(fail)s
            }
        }
        """ % dict(
                output=output,
                fail=sub["fail"],
                ctx=sub["params"],
                out_type=dtype_to_typecode(node.outputs[0].type.dtype),
            )

        if acc_dtype != node.outputs[0].type.dtype:
            code += """
        tmp = pygpu_empty(%(output)s->ga.nd, %(output)s->ga.dimensions,
                          %(acc_type)s, GA_C_ORDER, %(ctx)s, Py_None);
        if (!tmp) %(fail)s
        """ % dict(
                output=output,
                fail=sub["fail"],
                ctx=sub["params"],
                acc_type=dtype_to_typecode(acc_dtype),
            )
        else:
            code += f"""
        tmp = {output};
        Py_INCREF(tmp);
        """

        # We need the proxies since we are passing a pointer to the
        # data into the call and therefore we need a real copy of the
        # data in the proper type.
        code += """
        args[0] = &n;
        args[1] = tmp->ga.data;
        args[2] = &tmp->ga.offset;
        """ % dict(
            output=output
        )

        p = 3
        for i in range(node.inputs[0].ndim):
            code += """
        proxy_dim[%(i)s] = %(input)s->ga.dimensions[%(i)s];
        args[%(p)s] = &proxy_dim[%(i)s];
        n *= %(input)s->ga.dimensions[%(i)s];
        """ % dict(
                i=i, p=p, input=input
            )
            p += 1
            if not redux[i]:
                code += "gs *= %(input)s->ga.dimensions[%(i)s];" % dict(
                    input=input, i=i
                )

        code += """
        args[%(p)s] = %(input)s->ga.data;
        proxy_off = %(input)s->ga.offset;
        args[%(p)s+1] = &proxy_off;
        """ % dict(
            p=p, input=input
        )
        p += 2

        for i in range(node.inputs[0].ndim):
            code += """
        proxy_str[%(i)s] = %(input)s->ga.strides[%(i)s];
        args[%(p)s] = &proxy_str[%(i)s];
        """ % dict(
                p=p, i=i, input=input
            )
            p += 1

        code += """
        if (gs == 0) gs = 1;
        n /= gs;
        ls = %(ls)s;
        err = GpuKernel_call(&%(k_var)s, 1, &gs, &ls, 0, args);
        if (err != GA_NO_ERROR) {
            PyErr_Format(PyExc_RuntimeError,
                         "gpuarray error: GpuCAReduceCPY: %%s.",
                         GpuKernel_error(&%(k_var)s, err));
            %(fail)s
        }

        if (%(cast_out)d) {
            err = GpuArray_move(&%(output)s->ga, &tmp->ga);
            Py_XDECREF(tmp);
            if (err != GA_NO_ERROR) {
                PyErr_Format(PyExc_RuntimeError,
                             "gpuarray error: GpuCAReduceCPY [cast]: %%s.",
                             GpuArray_error(&tmp->ga, err));
                %(fail)s
            }
        } else {
            Py_XDECREF(%(output)s);
            %(output)s = tmp;
        }

        """ % dict(
            k_var="k_reduk_" + name,
            ls=ls,
            fail=sub["fail"],
            output=output,
            input=input,
            cast_out=bool(acc_dtype != node.outputs[0].type.dtype),
        )

        return code

    def c_code_cache_version_apply(self, node):
        return (4, self.kernel_version(node))

    def generate_kernel(self, node, odtype, redux):
        if isinstance(self.scalar_op, aes.Add):
            reduce_expr = "a + b"
        elif isinstance(self.scalar_op, aes.Mul):
            reduce_expr = "a * b"
        else:
            raise NotImplementedError()
        return ReductionKernel(
            node.inputs[0].type.context,
            odtype,
            self.scalar_op.identity,
            reduce_expr,
            redux,
            arguments=[make_argument(node.inputs[0], "a")],
            init_nd=node.inputs[0].ndim,
        )

    def perform(self, node, inp, out, ctx):
        (input,) = inp
        (output,) = out

        if self.axis is None:
            redux = [True] * input.ndim
        else:
            redux = self.redux

        if any(redux):
            output[0] = self.get_kernel_cache(node)(input).astype(
                copy=False, dtype=node.outputs[0].type.dtype
            )
        else:
            output[0] = pygpu.gpuarray.array(
                input, copy=True, dtype=node.outputs[0].type.dtype, context=ctx
            )


# To allow reloading old pickled files
GpuCAReduce = GpuCAReduceCPY
