"""
Contains an Op for convolving input images with a set of filters. This was
developed especially for Convolutional Neural Networks.

For related ops, including downsampling and subsampling, see
tensor.signal and tensor.signal.pool.

See especially conv2d().
"""

from __future__ import absolute_import, print_function, division

import logging

import numpy as np
from six.moves import xrange
import warnings

import theano
from theano import OpenMPOp
from theano.tensor import (as_tensor_variable, blas, get_scalar_constant_value,
                           patternbroadcast, NotScalarConstantError)
from theano.gof import Apply
from theano.tensor.nnet.abstract_conv import (get_conv_output_shape,
                                              get_conv_shape_1axis)

try:
    # TODO: move these back out to global scope when they no longer
    # cause an atexit error
    from scipy.signal.signaltools import _valfrommode, _bvalfromboundary
    from scipy.signal.sigtools import _convolve2d
    imported_scipy_signal = True
except ImportError:
    imported_scipy_signal = False

__docformat__ = "restructuredtext en"
_logger = logging.getLogger("theano.tensor.nnet.conv")


def conv2d(input, filters, image_shape=None, filter_shape=None,
           border_mode='valid', subsample=(1, 1), **kargs):
    """
    Deprecated, old conv2d interface.
    This function will build the symbolic graph for convolving a stack of
    input images with a set of filters. The implementation is modelled after
    Convolutional Neural Networks (CNN). It is simply a wrapper to the ConvOp
    but provides a much cleaner interface.

    Parameters
    ----------
    input : symbolic 4D tensor
        Mini-batch of feature map stacks, of shape
        (batch size, stack size, nb row, nb col)
        see the optional parameter image_shape
    filters: symbolic 4D tensor
        Set of filters used in CNN layer of shape
        (nb filters, stack size, nb row, nb col)
        see the optional parameter filter_shape
    border_mode : {'valid', 'full'}
       'valid'only apply filter to complete patches of the image. Generates
       output of shape: image_shape - filter_shape + 1.
       'full' zero-pads image to multiple of filter shape to generate output
       of shape: image_shape + filter_shape - 1.
    subsample: tuple of len 2
        Factor by which to subsample the output. Also called strides elsewhere.
    image_shape: None, tuple/list of len 4 of int, None or Constant variable
        The shape of the input parameter.
        Optional, used for optimization like loop unrolling
        You can put None for any element of the list to tell that this element
        is not constant.
    filter_shape : None, tuple/list of len 4 of int, None or Constant variable
        Optional, used for optimization like loop unrolling
        You can put None for any element of the list
        to tell that this element is not constant.
    kwargs
        Kwargs are passed onto ConvOp. Can be used to set the following:
        unroll_batch, unroll_kern, unroll_patch, openmp (see ConvOp doc).

        openmp: By default have the same value as
                config.openmp. For small image, filter,
                batch size, nkern and stack size, it can be
                faster to disable manually openmp. A fast and
                incomplete test show that with image size
                6x6, filter size 4x4, batch size==1,
                n kern==1 and stack size==1, it is faster
                to disable it in valid mode. But if we
                grow the batch size to 10, it is faster
                with openmp on a core 2 duo.

    Returns
    -------
    symbolic 4D tensor
        Set of feature maps generated by convolutional layer. Tensor is
        of shape (batch size, nb filters, output row, output col).

    """

    warnings.warn("theano.tensor.nnet.conv.conv2d is deprecated."
                  " Use theano.tensor.nnet.conv2d instead.")

    # accept Constant value for image_shape and filter_shape.
    if image_shape is not None:
        image_shape = list(image_shape)
        for i in xrange(len(image_shape)):
            if image_shape[i] is not None:
                try:
                    image_shape[i] = get_scalar_constant_value(
                        as_tensor_variable(image_shape[i]))
                except NotScalarConstantError:
                    raise NotScalarConstantError(
                        "The convolution need that the shape"
                        " information are constant values. We got"
                        " %s for the image_shape parameter" %
                        image_shape[i])
                assert image_shape[i].dtype in theano.tensor.discrete_dtypes
                image_shape[i] = int(image_shape[i])
    if filter_shape is not None:
        filter_shape = list(filter_shape)
        for i in xrange(len(filter_shape)):
            if filter_shape[i] is not None:
                try:
                    filter_shape[i] = get_scalar_constant_value(
                        as_tensor_variable(filter_shape[i]))
                except NotScalarConstantError:
                    raise NotScalarConstantError(
                        "The convolution need that the shape"
                        " information are constant values. We got"
                        " %s for the filter_shape "
                        "parameter" % filter_shape[i])
                assert filter_shape[i].dtype in theano.tensor.discrete_dtypes
                filter_shape[i] = int(filter_shape[i])

    if image_shape and filter_shape:
        try:
            if image_shape[1] is not None and filter_shape[1] is not None:
                assert image_shape[1] == filter_shape[1]
        except Exception:
            print('image ', image_shape, ' filters ', filter_shape)
            raise

    if filter_shape is not None:
        nkern = filter_shape[0]
        kshp = filter_shape[2:]
    else:
        nkern, kshp = None, None

    if image_shape is not None:
        bsize = image_shape[0]
        imshp = image_shape[1:]
    else:
        bsize, imshp = None, None

    op = ConvOp(output_mode=border_mode, dx=subsample[0], dy=subsample[1],
                imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)

    return op(input, filters)


class ConvOp(OpenMPOp):
    """
    This Op serves a dual purpose: it can implement a vanilla 2D convolution
    (as taught in any signal processing class) or implement the
    convolutional layers found in Convolutional Neural Networks.

    In this setting, a set of 3D images is convolved with a set of 3D kernels,
    with the particularity that their leading dimensions are of equal length.
    Vanilla 2D convolution is treated as a special case of this.

    The input parameter represents a mini-batch of multiple images. Its shape is:
        batch size x num. input feature maps x image height x image width

    The kernel parameter represents a set of 3D kernels. Its shape is:
        number of filters x num. input images x filter height x filter width

    The output of ConvOp is a 4D tensor, generated as follows:
        output[b,k,:,:] = \sum_i input[b,i,:,:] * filter[k,i,:,:] \forall b,k
    where b is the mini-batch index, k the filter index and * is the
    convolution operator.

    The constructor initializes a ConvOp with given output_mode (full/valid).
    All other parameters are optional and are only used to generate more
    optimized c code, or to enable graph optimizers to optimally replace the
    ConvOp.

    NOTES ON OPTIMIZATION:
    There are two types of optimization. The first is the selection of the
    fastest algo when bsize and nkern are provided with imshp and kshp.
    By default we try to select the fastest version. You can specify it
    with the unroll_batch, unroll_kern, and unroll_patch parameter.

    The second type of optimization is hardcoding some dimensions into the
    code when all shape are know.
    This make a significant difference for the 'full' output_mode.

    Sometimes, the fastest implementation on x86-64 uses
    {unroll_batch=4, unroll_kern=4, unroll_patch=False}
    with all other shape parameters being provided.

    For optimizing other architectures, see:
    Kazushige Goto and Robert A. Van De Geijn, Anatomy of High-Performance
    Matrix Multiplication, (mr x nr). ACM Transactions on Mathematical
    Software, May 2008.
    Figure 12: (mr x nr). For x86 use 2x4, itanium 8x8, etc.

    Parameters
    ----------
    output_mode : {'valid', 'full'}
        'valid' gives an output smaller then the image.
        'full' gives an output bigger then the image.
         See 'border_mode' in conv2d's doc.

    Optional parameters: (will generate more optimal c code)

    imshp : tuple of len 2 or 3: 2 for 2d image, 3 for a stack of 2d images.
        Stacksize, nb image row, nb image col.
    kshp : tuple of len 2
        Nb kernel row, nb kernel col.
    nkern : int
        The number of kernel.
    bsize : int
        The size of the minibatch.
    dx : int
        Patch stride rows.
    dy : int
        Patch stride cols

    Params which select the version of code used:

    unroll_patch : bool
        Use a version of c_code that unroll the patch loop that don't
        request all shape information to work, but if all shape information
        are present, will use it to hardcode the value in the code for
        faster code.
    unroll_batch : int
        Use a version of c_code that unroll the batch (by unroll_batch)
        and the nkern (by unroll_kern) loop. The size must by a multiple
        of bsize or nkern respectively.
    unroll_kern : int
        Use a version of c_code that unroll the batch
        (by unroll_batch) and the nkern(by unroll_kern) loop. The size
        must by a multiple of bsize or nkern respectively.
    verbose : int
        Passed to GpuConv.
    version: int or str
        Passed to GpuConv, if version='no_fft', fft
        optimization will be desactivated at the op level.
    direction_hint: {'forward', 'bprop weights', 'bprop inputs'}
        Passed to GpuConv, used by graph optimizers to aid algorithm choice.

    The 3 following parameters are used internally when we generate
    the gradient when dx!=1 or dy!=1.

    imshp_logical
        Default None. None value is equivalent to imshp value.
        When imshp_logical != imshp, it tell we need to insert 0 in
        the image before we do the convolution. For example, when dx==dy==2
        and the image is [[1, 2], [3, 4]], we should make as if the image
        was [[1, 0, 2, 0], [0, 0, 0, 0], [3, 0, 4, 0], [0, 0, 0, 0]].
        Our python code insert the zero, but the c code optimize it.
        imshp_logical != imshp when taking the grad again the weights or
        the image when the output_mode is full and `dx != 1` or `dy != 1`.
    kshp_logical
        Idem but for kshp and used for the grad again the
        weights when the output_mode is valid and `dx != 1` or `dy != 1`.
    kshp_logical_top_aligned
        Used in the same case. Default to True.
        Set to False in the grad again the weight when the
        output_mode is full.

    """

    __attrnames = ['imshp', 'kshp', 'nkern', 'bsize', 'dx', 'dy', 'out_mode',
                   'unroll_batch', 'unroll_kern', 'unroll_patch',
                   'imshp_logical', 'kshp_logical', 'kshp_logical_top_aligned']
    """These attributes uniquely identify the behaviour of this op for
    given inputs. Do not set openmp here.
    """

# the value of speed_unroll_batch_kern,speed_unroll_patch_noshape,speed_unroll_patch_shape
# have bean calculated on maggie36 when their is only 1 session logged on and only this was running.
# It is an Intel(R) Xeon(R) CPU E5430 @ 2.66GHz. It is computer with theano/tensor/nnet/tests/speed_test_conv.py
# and took 5 minutes to run.
# TODO: we should compute this table for each computer/os as this can change.
#      I saw on one computer that the speed with the shape can be slower than without!
#      using the real shape and the same dtype could also help.

# unroll_batch, unroll_kern, valid time, full time
    speed_unroll_batch_kern = [(1, 1, 2.4661250114440918, 6.5472931861877441),
                               (1, 2, 1.5869178771972656, 5.1499760150909424),
                               (1, 3, 1.4270510673522949, 3.6593470573425293),
                               (1, 4, 1.3373479843139648, 3.3451821804046631),
                               (1, 5, 1.2818830013275146, 3.1444568634033203),
                               (1, 6, 1.2521560192108154, 3.0256359577178955),
                               (1, 10, 1.2134110927581787, 2.9174180030822754),
                               (2, 1, 1.657214879989624, 4.5261678695678711),
                               (2, 2, 1.2123160362243652, 2.9747390747070312),
                               (2, 3, 1.0758891105651855, 2.5690360069274902),
                               (2, 4, 1.0683329105377197, 2.4233770370483398),
                               (2, 5, 1.0955719947814941, 2.3999948501586914),
                               (2, 6, 1.5935721397399902, 2.6878271102905273),
                               (2, 10, 1.8511250019073486, 3.2417428493499756),
                               (3, 1, 1.5948119163513184, 3.631148099899292),
                               (3, 2, 1.0761330127716064, 2.6011371612548828),
                               (3, 3, 1.0551531314849854, 2.4200370311737061),
                               (3, 4, 1.3930759429931641, 2.5211219787597656),
                               (3, 5, 1.4330689907073975, 2.5704989433288574),
                               (3, 6, 1.362138032913208, 2.5964410305023193),
                               (3, 10, 1.6582000255584717, 2.9907989501953125),
                               (4, 1, 1.4793620109558105, 3.3473429679870605),
                               (4, 2, 1.0671560764312744, 2.4171769618988037),
                               (4, 3, 1.2569692134857178, 2.2807950973510742),
                               (4, 4, 1.3456289768218994, 2.6219108104705811),
                               (4, 5, 1.4055080413818359, 2.4606490135192871),
                               (4, 6, 1.372107982635498, 2.551663875579834),
                               (4, 10, 1.599470853805542, 2.9172940254211426),
                               (5, 1, 1.4115700721740723, 3.2077109813690186),
                               (5, 2, 1.0635769367218018, 2.2648060321807861),
                               (5, 3, 1.3842809200286865, 2.6135518550872803),
                               (5, 4, 1.3470511436462402, 2.3852400779724121),
                               (5, 5, 1.3539440631866455, 2.5245928764343262),
                               (5, 6, 1.4037849903106689, 2.5985310077667236),
                               (5, 10, 1.6120610237121582, 2.8127608299255371),
                               (6, 1, 1.3623628616333008, 3.021122932434082),
                               (6, 2, 1.1697649955749512, 2.6285450458526611),
                               (6, 3, 1.2980999946594238, 2.4746189117431641),
                               (6, 4, 1.3739941120147705, 2.5579929351806641),
                               (6, 5, 1.3967819213867188, 2.5522029399871826),
                               (6, 6, 1.4279270172119141, 2.6127138137817383),
                               (6, 10, 1.605496883392334, 2.864037036895752),
                               (10, 1, 1.6401121616363525, 2.970099925994873),
                               (10, 2, 1.46710205078125, 2.7231831550598145),
                               (10, 3, 1.4193780422210693, 2.6087639331817627),
                               (10, 4, 1.4657118320465088, 2.6246678829193115),
                               (10, 5, 1.5052611827850342, 2.6542458534240723),
                               (10, 6, 1.5214400291442871, 2.7243161201477051),
                               (10, 10, 1.6116268634796143, 2.956165075302124)]

    # valid time, full time
    speed_unroll_patch_noshape = [2.0109100341796875, 5.8175678253173828]
    # valid time, full time
    speed_unroll_patch_shape = [1.2967290878295898, 5.5283889770507812]

    @staticmethod
    def has_all_shape(imshp, kshp, nkern=1, bsize=1):
        return (nkern is not None and bsize is not None and
                all(shp is not None for shp in imshp) and
                all(shp is not None for shp in kshp))

    @staticmethod
    def getOutputShape(inshp, kshp, stride=(1, 1), mode='valid'):
        """
        Computes the output dimensions of convolving an image of shape "inshp"
        with kernels of shape "kshp". Accepts symbolic or integer shapes.
        Propagates `None`s (for unknown shapes).

        Parameters
        ----------
        inshp
            (rows,cols) of input image.
        kshp
            (rows,cols) of filters.
        mode: {'valid', 'full'}
            See 'border_mode' in conv2d's doc.

        Returns
        -------
        object
            (rows,cols) of output image.

        """
        # The formula would be ceil((i + s * k - s * 1) / float(d)),
        # with s=1 for mode=='full' and s=-1 for mode=='valid'.
        # To support symbolic shapes, we express this with integer arithmetics.
        warnings.warn("The method `getOutputShape` is deprecated use"
                      "`get_conv_output_shape` instead.", stacklevel=2)
        return tuple(get_conv_shape_1axis(i, k, mode, d)
                     for i, k, d in zip(inshp, kshp, stride))

    def __init__(self, imshp=None, kshp=None, nkern=None, bsize=None,
                 dx=1, dy=1,
                 output_mode='valid',

                 unroll_batch=None,
                 unroll_kern=None,
                 unroll_patch=None,
                 imshp_logical=None,
                 kshp_logical=None,
                 kshp_logical_top_aligned=True,
                 verbose=0,
                 version=-1,
                 direction_hint='forward',
                 openmp=None):
        # Deactivate fft_optimization at the op level if specified
        if version == "no_fft":
            self.fft_opt = False
            version = -1
        else:
            self.fft_opt = True

        # Expand unknown image / kernel shapes into tuples of Nones
        if imshp is None:
            imshp = (None, None, None)
        else:
            imshp = tuple(imshp)
        if kshp is None:
            kshp = (None, None)
        else:
            kshp = tuple(kshp)

        # Check imshp and kshp dimensionality
        if len(imshp) == 2:
            imshp = (1,) + imshp
        elif len(imshp) != 3:
            raise ValueError("len(imshp) must be 2 or 3, got %d" % len(imshp))
        if len(kshp) != 2:
            raise ValueError("len(kshp) must be 2, got %d" % len(kshp))

        # We must continue to consider None as 1 for backward compatibility.
        if dx is None:
            dx = 1
        if dy is None:
            dy = 1

        if int(dx) != dx:
            raise TypeError('ConvOp.__init__ param dx must be an int', dx)
        dx = int(dx)

        if int(dy) != dy:
            raise TypeError('ConvOp.__init__ param dy must be an int', dy)
        dy = int(dy)

        all_shape = self.has_all_shape(imshp, kshp, nkern, bsize)
        if (unroll_batch or unroll_kern) and not all_shape:
            raise Exception("In ConvOp, when using unroll_batch and"
                            " unroll_nkern, all shape are needed")

        # Init the openmp attribute
        super(ConvOp, self).__init__(openmp=openmp)
        if not all_shape or self.openmp:
            # Only this version is parallelized
            unroll_patch = True

        self.imshp = imshp
        self.kshp = kshp
        self.nkern = nkern
        self.bsize = bsize
        self.dx = dx
        self.dy = dy
        self.verbose = verbose
        self.version = version
        self.direction_hint = direction_hint

        # a triple
        if imshp_logical is None:
            self.imshp_logical = self.imshp
        else:
            imshp_logical = tuple(imshp_logical)
            if len(imshp_logical) != 3:
                raise ValueError("len(imshp_logical) must be 3, got %d" % len(imshp_logical))
            self.imshp_logical = imshp_logical

        # a pair
        if kshp_logical is None:
            self.kshp_logical = self.kshp
        else:
            kshp_logical = tuple(kshp_logical)
            if len(kshp_logical) != 2:
                raise ValueError("len(kshp_logical) must be 2, got %d" % len(kshp_logical))
            self.kshp_logical = kshp_logical

        # a bool
        self.kshp_logical_top_aligned = kshp_logical_top_aligned

        self.unroll_batch = unroll_batch
        self.unroll_kern = unroll_kern
        self.unroll_patch = unroll_patch

        if self.unroll_batch and not self.unroll_kern:
            self.unroll_kern = 1
        if self.unroll_kern and not self.unroll_batch:
            self.unroll_batch = 1

        # downcast unroll_batch if not a divisor of batch size
        if self.unroll_batch is not None and self.unroll_batch > 0 and self.bsize % self.unroll_batch != 0:

            if self.bsize <= self.unroll_batch:
                self.unroll_batch = self.bsize
            else:
                # find the maximum value under unroll_batch that would work
                new = self.unroll_batch
                assert(new >= 1)
                while self.bsize % new != 0:
                    new -= 1

                warnstr = ("OPTIMISATION WARNING: in ConvOp.__init__() "
                           "unroll_batch(%i) must be 0 or a divisor of"
                           " bsize(%i). We revert it to %i. This"
                           " won't change the result, but may make it slower.")
                _logger.warn(warnstr, self.unroll_batch, self.bsize, new)

                self.unroll_batch = new

        # downcast unroll_kern if not a divisor of nb of kernel
        if self.unroll_kern is not None and self.unroll_kern > 0 and self.nkern % self.unroll_kern != 0:

            if self.nkern <= self.unroll_kern:
                self.unroll_kern = self.nkern
            else:
                # find the maximum value under unroll_kern that would work
                new = self.unroll_kern
                assert(new >= 1)
                while self.nkern % new != 0:
                    new -= 1

                warnstr = ("OPTIMISATION WARNING: in ConvOp.__init__()"
                           " unroll_kern(%i) should be 0 or a divisor of"
                           " nkern(%i). We revert it to %i. This"
                           " won't change the result, but may make it slower.")
                _logger.warn(warnstr, self.unroll_kern, self.nkern, new)
                self.unroll_kern = new

        self.outshp = get_conv_output_shape(
            (None,) + self.imshp_logical,
            (None, None,) + self.kshp_logical,
            output_mode,
            (dx, dy))[2:]
        self.fulloutshp = get_conv_output_shape(
            (None,) + self.imshp_logical,
            (None, None,) + self.kshp_logical,
            output_mode,
            (1, 1))[2:]

        self.out_mode = output_mode

        if self.out_mode not in ["valid", "full"]:
            raise Exception("Mode %s not implemented" % str(self.out_mode))

        if any((shp is not None) and (shp <= 0) for shp in self.outshp):
            raise Exception("Bad size for the output shape. Verify that [post-"
                            "supersampling] input shape (%s) and kern"
                            " shape(%s) are ok. (Hint: kerns must fit inside"
                            " image in valid mode)" %
                            (self.imshp_logical, self.kshp_logical))

        if (self.unroll_kern is None and
                self.unroll_batch is None and
                self.unroll_patch is None):
            # no version specified. Find the faster we have
            if self.bsize is None and self.nkern is None:
                self.unroll_patch = True
            elif self.bsize is not None and self.nkern is not None:
                bsize = self.bsize
                nkern = self.nkern
                mode_idx = 0
                if self.out_mode != "valid":
                    mode_idx = 1
                if self.has_all_shape(self.imshp, self.kshp):
                    time_unroll_patch = self.speed_unroll_patch_shape[mode_idx]
                else:
                    time_unroll_patch = self.speed_unroll_patch_noshape[
                        mode_idx]
                time_unroll_batch_kern = 9999999
                for i in xrange(len(self.speed_unroll_batch_kern)):
                    if (bsize % self.speed_unroll_batch_kern[i][0] == 0 and
                            nkern % self.speed_unroll_batch_kern[i][1] == 0):
                        if self.speed_unroll_batch_kern[i][2 + mode_idx] < time_unroll_batch_kern:
                            time_unroll_batch_kern = self.speed_unroll_batch_kern[i][2 + mode_idx]
                            time_unroll_batch_kern_idx = i
                if time_unroll_patch < time_unroll_batch_kern:
                    self.unroll_patch = True
                else:
                    self.unroll_batch = self.speed_unroll_batch_kern[
                        time_unroll_batch_kern_idx][0]
                    self.unroll_kern = self.speed_unroll_batch_kern[
                        time_unroll_batch_kern_idx][1]
                    self.unroll_patch = False

            _logger.debug("AUTO FIND VERSION OF C_CODE OF CONV OP "
                          "%s %s %s %s %s %s %s",
                          self.unroll_batch, self.unroll_kern,
                          self.unroll_patch,
                          self.bsize, self.nkern, time_unroll_patch,
                          time_unroll_batch_kern)

        self._rehash()

    def __eq__(self, other):
        if type(self) != type(other):
            return False
        for a in self.__attrnames:
            if getattr(self, a) != getattr(other, a):
                return False
        return True

    def __setstate__(self, d):
        super(ConvOp, self).__setstate__(d)
        self.direction_hint = d.get("direction_hint", None)
        self._rehash()

    def _rehash(self):
        hashval = hash(type(self))
        for a in self.__attrnames:
            hashval = hashval ^ hash(getattr(self, a))
        self.__hashval = hashval

    def __hash__(self):
        return self.__hashval

    def __str__(self):
        return "ConvOp{" + ",".join(str((a, getattr(self, a)))
                                    for a in self.__attrnames) + "}"

    def flops(self, inputs, outputs):
        """
        Useful with the hack in profiling to print the MFlops.

        """
        images, kerns = inputs
        out, = outputs
        assert images[1] == kerns[1]
        flops = 0
        if self.out_mode == "valid":
            # nb mul and add by output pixel
            flops = kerns[2] * kerns[3] * 2
            # nb flops by output image
            flops *= out[2] * out[3]
            # nb patch multiplied
            flops *= images[1] * kerns[0] * images[0]
        else:
            flops = (images[0] * kerns[0] * images[1] *
                     kerns[2] * kerns[3] *
                     images[2] * images[3] * 2)
        return flops

    def make_node(self, inputs, kerns):
        # TODO: find a way to make ConvOp work for N-D (after NIPS09)
        """
        Parameters
        ----------
        inputs
            4 dim: batches x stacksize x rows x cols.
        kerns
            4 dim: nkern x stackidx x rows x cols.

        """
        _inputs = as_tensor_variable(inputs)
        _kerns = as_tensor_variable(kerns)
        # TODO: lift this restriction by upcasting either inputs or kerns
        if _inputs.ndim != 4:
            raise TypeError('ConvOp (make_node) requires input be a 4D tensor;'
                            ' received "%s" (%i dims)' %
                            (inputs, _inputs.ndim))
        if _kerns.ndim != 4:
            raise TypeError('make_node requires 4D tensor of kernels')
        if _inputs.type.dtype != _kerns.type.dtype:
            raise NotImplementedError(
                "The image and the kernel must have the same type."
                "inputs(%s), kerns(%s)" % (_inputs.dtype, _kerns.dtype))
        bcastable23 = [self.outshp[0] == 1, self.outshp[1] == 1]
        output = theano.tensor.tensor(dtype=_inputs.type.dtype,
                                      broadcastable=[_inputs.broadcastable[0],
                                                     _kerns.broadcastable[0]] +
                                      bcastable23)

        return Apply(self, [_inputs, _kerns], [output])

    def infer_shape(self, node, input_shapes):
        imshp = input_shapes[0]  # 4D image shape
        kshp = input_shapes[1]   # 4D filter shape
        bsize, imshp = imshp[0], list(imshp[1:])
        nkern, kshp = kshp[0], list(kshp[2:])
        # replace symbolic shapes with known shapes
        if self.bsize is not None:
            bsize = self.bsize
        for i in [0, 1, 2]:
            if self.imshp_logical[i] is not None:
                imshp[i] = self.imshp_logical[i]
        if self.nkern is not None:
            nkern = self.nkern
        for i in [0, 1]:
            if self.kshp_logical[i] is not None:
                kshp[i] = self.kshp_logical[i]
        # infer output shape from what we have
        res = get_conv_output_shape(
            (bsize,) + tuple(imshp),
            (nkern, None,) + tuple(kshp),
            self.out_mode,
            (self.dx, self.dy))
        return [res]

    def perform(self, node, inp, out):
        """
        By default if len(img2d.shape)==3, we TODO

        """
        img2d, filtersflipped = inp
        z, = out
        if not imported_scipy_signal:
            raise theano.gof.utils.MethodNotDefined(
                "c_headers", type(self), self.__class__.__name__,
                "Need the python package for scipy.signal to be installed "
                "for the python implementation. You can use the C"
                " implementation instead.")

        # TODO: move these back out to global scope when they no longer
        #       cause an atexit error
        imshp = self.imshp
        if any(x is None for x in imshp):
            imshp = tuple(img2d.shape[1:])
        if imshp != img2d.shape[1:]:
            raise ValueError("The image shape provided at build time "
                             "is different from the one passed at run time",
                             imshp, img2d.shape[1:])
        kshp = self.kshp
        if any(x is None for x in kshp):
            kshp = tuple(filtersflipped.shape[2:])
        if kshp != filtersflipped.shape[2:]:
            raise ValueError("The filter shape provided at build time "
                             "is different from the one passed at run time",
                             kshp, filtersflipped.shape[2:])
        bsize = self.bsize
        if bsize is None:
            bsize = img2d.shape[0]
        elif bsize != img2d.shape[0]:
            raise ValueError("The batch size provided at build time "
                             "is different from the one passed at run time",
                             bsize, img2d.shape[0])
        nkern = self.nkern
        if nkern is None:
            nkern = filtersflipped.shape[0]
        elif nkern != filtersflipped.shape[0]:
            raise ValueError("The number of filters provided at build time "
                             "is different from the one passed at run time",
                             nkern, filtersflipped.shape[0])

        imshp_logical = self.imshp_logical
        if imshp_logical[0] is None:
            imshp_logical = (imshp[0],) + imshp_logical[1:]
        if imshp_logical[1] is None:
            imshp_logical = (imshp_logical[0], imshp[1], imshp_logical[2])
        if imshp_logical[2] is None:
            imshp_logical = imshp_logical[:2] + (imshp[2],)
        assert all(x is not None for x in imshp_logical)

        kshp_logical = self.kshp_logical
        if kshp_logical[0] is None:
            kshp_logical = (kshp[0], kshp_logical[1])
        if kshp_logical[1] is None:
            kshp_logical = (kshp_logical[0], kshp[1])
        assert all(x is not None for x in kshp_logical)

        if all(shp is not None for shp in self.fulloutshp):
            fulloutshp = tuple(self.fulloutshp)
        else:
            fulloutshp = get_conv_output_shape(
                (None,) + imshp_logical,
                (None, None,) + kshp_logical,
                self.out_mode,
                (1, 1))[2:]

        if z[0] is None or z[0].shape != (bsize, nkern,) + fulloutshp:
            z[0] = np.zeros((bsize, nkern,) + fulloutshp,
                            dtype=img2d.dtype)
        zz = z[0]

        stacklen = imshp[0]

        img2d = img2d.reshape((bsize,) + imshp)
        filtersflipped = filtersflipped.reshape((nkern, stacklen) + kshp)

        if self.imshp != self.imshp_logical:
            # assuming that to get from imshp to imshp logical we insert zeros in missing spots
            rstride = int(np.ceil(imshp_logical[1] / float(imshp[1])))
            cstride = int(np.ceil(imshp_logical[2] / float(imshp[2])))
            buf = np.zeros((bsize,) + imshp_logical, dtype=img2d.dtype)
            buf[:, :, ::rstride, ::cstride] = img2d
            img2d = buf
            del buf, rstride, cstride

        if kshp != kshp_logical:
            rstride = int(np.ceil(kshp_logical[0] / float(kshp[0])))
            cstride = int(np.ceil(kshp_logical[1] / float(kshp[1])))
            buf = np.zeros((nkern, stacklen) +
                           self.kshp_logical, dtype=filtersflipped.dtype)
            if self.kshp_logical_top_aligned:
                roffset = coffset = 0
            else:
                roffset = (kshp_logical[0] - (kshp[0] *
                                              rstride) - 1 + rstride) % rstride
                coffset = (kshp_logical[1] - (kshp[1] *
                                              cstride) - 1 + cstride) % cstride
                assert roffset >= 0
                assert coffset >= 0
            buf[:, :, roffset::rstride, coffset::cstride] = filtersflipped
            filtersflipped = buf
            del buf, rstride, cstride

        val = _valfrommode(self.out_mode)
        bval = _bvalfromboundary('fill')

        with warnings.catch_warnings():
            warnings.simplefilter('ignore', np.ComplexWarning)
            for b in xrange(bsize):
                for n in xrange(nkern):
                    zz[b, n, ...].fill(0)
                    for im0 in xrange(stacklen):
                        # some cast generates a warning here
                        zz[b, n, ...] += _convolve2d(img2d[b, im0, ...],
                                                     filtersflipped[n, im0, ...],
                                                     1, val, bval, 0)

        if False:
            if False and self.out_mode == "full":
                img2d2 = np.zeros((bsize, stacklen,
                                   imshp[1] + 2 * kshp[0] - 2,
                                   imshp[2] + 2 * kshp[1] - 2))
                img2d2[:, :, kshp[0] - 1:kshp[0] - 1 + imshp[1],
                       kshp[1] - 1:kshp[1] - 1 + imshp[2]] = img2d
                img2d = img2d2
            # N_image_shape = image_data.shape

            for b in xrange(bsize):
                for n in xrange(nkern):
                    zz[b, n, ...].fill(0)
                    for im0 in xrange(stacklen):
                        for row in xrange(0, zz.shape[2], self.dx):
                            for col in xrange(0, zz.shape[3], self.dy):
                                zz[b, n, row, col] += (
                                    img2d[b, im0, row:row + kshp[0],
                                          col:col + kshp[1]] *
                                    filtersflipped[n, im0, ::-1, ::-1]).sum()

        # We copy it to remove the Stride mismatch warning from DEBUG_MODE.
        # The copy make that we return an object with the same stride as the c version.
        # The copy don't affect the performance during our experience as in that case we
        # execute the c version which is much faster.
        if self.dx > 1 or self.dy > 1:
            zz = zz[:, :, 0::self.dx, 0::self.dy].copy()
        z[0] = zz

    def R_op(self, inputs, eval_points):
        rval = None
        if eval_points[0] is not None:
            rval = self.make_node(eval_points[0], inputs[1]).outputs[0]
        if eval_points[1] is not None:
            if rval is None:
                rval = self.make_node(inputs[0], eval_points[1]).outputs[0]
            else:
                rval += self.make_node(inputs[0], eval_points[1]).outputs[0]
        return [rval]

    def grad(self, inp, grads):
        inputs, kerns = inp
        gz, = grads

        if self.imshp != self.imshp_logical or self.kshp != self.kshp_logical:
            raise NotImplementedError('todo')

        if self.out_mode == 'valid' and (self.dx, self.dy) != (1, 1):
            raise NotImplementedError(
                "ERROR: ConvOp.grad is now disabled for 'valid' convolutions with"
                " stride != (1, 1); call theano.tensor.nnet.conv2d() instead.")

        if self.dx not in (1, 2) or self.dy not in (1, 2):
            raise NotImplementedError(
                "ERROR: We disable ConvOp.grad now when output_mode is not"
                " 'valid' and dx or dy are greater than 2, as there is a bug"
                " in it. See `abstract_conv2d <>`_ for a version that support this.")

        all_shape = self.has_all_shape(self.imshp, self.kshp,
                                       self.nkern, self.bsize)

        if not all_shape and (self.dx != 1 or self.dy != 1):
            raise Exception("ConvOp.grad when dx!=1 or dy!=1 we must have all "
                            "the optional shape information")

        # Determine gradient on kernels ########
        assert inputs.ndim == 4 and kerns.ndim == 4

        newin = inputs.dimshuffle((1, 0, 2, 3))
        newgz = gz.dimshuffle((1, 0, 2, 3))

        if self.out_mode == 'valid':
            (img, filters) = (newin, newgz)
            kshp_logical = self.fulloutshp
            kshp_logical_top_aligned = False
            imshp_logical = None
            (bsize, nkern) = (self.imshp[0], self.nkern)
            imshp = (self.bsize, self.imshp[1], self.imshp[2])
            kshp = self.outshp
        elif self.out_mode == 'full':
            (img, filters) = (newgz, newin)
            kshp_logical = None
            kshp_logical_top_aligned = True
            imshp_logical = (self.bsize,
                             self.fulloutshp[0],
                             self.fulloutshp[1])
            (bsize, nkern) = (self.nkern, self.imshp[0])
            imshp = (self.bsize, self.outshp[0], self.outshp[1])
            kshp = self.imshp[1:]
        else:
            raise NotImplementedError(
                'Only [full,valid] modes are currently supported.')

        filters = filters[:, :, ::-1, ::-1]  # flip them

        dw = ConvOp(imshp, kshp, nkern, bsize, 1, 1, output_mode='valid',
                    unroll_batch=None, unroll_kern=None, unroll_patch=None,
                    imshp_logical=imshp_logical,
                    kshp_logical=kshp_logical,
                    kshp_logical_top_aligned=kshp_logical_top_aligned,
                    version=self.version,
                    direction_hint='bprop weights',
                    verbose=self.verbose)

        dw = dw(img, filters)

        if all_shape:
            assert all(o == k for o, k in zip(dw.owner.op.outshp, self.kshp))
        if self.out_mode == 'valid':
            # before DimShuffle, dw is of shape visdim x nkern x kshp[0] x kshp[1]
            dw = dw.dimshuffle((1, 0, 2, 3))
            dw = dw[:, :, ::-1, ::-1]

        # Determine gradient on inputs ########
        mode = 'valid'
        if not self.out_mode == 'full':
            mode = 'full'

        filters = kerns.dimshuffle((1, 0, 2, 3))
        filters = filters[:, :, ::-1, ::-1]

        nkern = self.imshp[0]
        imshp = (self.nkern, self.outshp[0], self.outshp[1])
        imshp_logical = (self.nkern, self.fulloutshp[0],
                         self.fulloutshp[1])

        din = ConvOp(imshp, self.kshp, nkern, self.bsize,
                     1, 1, output_mode=mode,
                     unroll_batch=None, unroll_kern=None,
                     unroll_patch=None,
                     imshp_logical=imshp_logical,
                     kshp_logical=None,
                     version=-1,  # we we change the mode, we don't forward the version.
                     direction_hint='bprop inputs',
                     verbose=self.verbose)

        din = din(gz, filters)

        assert all(o is None or o == i
                   for o, i in zip(din.owner.op.outshp, self.imshp[1:]))

        # din and dw should have the same broadcasting pattern as the
        # parameters they are the gradient of (resp. inputs and kerns).
        din = patternbroadcast(din, inputs.broadcastable)
        dw = patternbroadcast(dw, kerns.broadcastable)
        return [din, dw]

    def c_headers(self):
        return ['<numpy/noprefix.h>', '<iostream>', '<sstream>']

    def c_code_cache_version(self):
        return (15, self.openmp, blas.blas_header_version())

    def c_support_code(self):
        return """
#define STRIDES(arr) (PyArray_STRIDES(arr))
#define FULL  2
#define SAME  1
#define VALID 0
#define MOD %
using namespace std;
""" + blas.blas_header_text()

    def use_blas(self):
        """ Return True if we will generate code that use gemm.
        """
        # the gemm version only support that case
        if self.out_mode == 'valid' and self.dx == 0 and self.dy == 0:
            # We use a faster version in those case.
            if (self.imshp != self.imshp_logical or
                    self.kshp != self.kshp_logical or
                    self.unroll_patch or
                    self.unroll_batch > 0 or
                    self.unroll_kern > 0):
                return False
            return True
        return False

    def c_libraries(self):
        if self.use_blas():
            return blas.ldflags()
        return []

    def c_no_compile_args(self):
        # when the ksph==(1,1) gcc 4.3.0 segfault during the
        # compilation with -O3.  This don't happen at -O2
        if (theano.gof.cmodule.gcc_version() in ['4.3.0'] and
                self.kshp == (1, 1)):
            return ['-O3']
        else:
            return []

    def c_compile_args(self):
        ret = []

        if self.use_blas():
            ret = blas.ldflags(libs=False, flags=True)
        if (theano.gof.cmodule.gcc_version() in ['4.3.0'] and
                self.kshp == (1, 1)):
            ret += ['-O2']
        # Add the -fopenmp flags
        ret += super(ConvOp, self).c_compile_args()

        return ret

    def c_lib_dirs(self):
        if self.use_blas():
            return blas.ldflags(libs=False, libs_dir=True)
        return []

    def c_header_dirs(self):
        if self.use_blas():
            return blas.ldflags(libs=False, include_dir=True)
        return []

    def c_code(self, node, name, inp, out, sub):
        img2d, filtersflipped = inp
        z, = out
        if node.inputs[0].type.dtype != node.inputs[1].type.dtype:
            raise NotImplementedError()
        assert node.inputs[0].type.dtype == node.inputs[1].type.dtype
        d = locals()
        d.update(sub)

        all_shape = (self.has_all_shape(self.imshp, self.kshp,
                                        self.nkern, self.bsize) and
                     self.has_all_shape(self.imshp_logical, self.kshp_logical))

        d["self_out_mode"] = self.out_mode
        d["self_dx"] = self.dx
        d["self_dy"] = self.dy
        d["mode"] = self.out_mode.upper()
        d["affectation"] = "="

        # Default values, will be overrided if the shape info is provided
        d["self_bsize"] = "PyArray_DIMS(%(img2d)s)[0]" % d
        d["self_nkern"] = "PyArray_DIMS(%(filtersflipped)s)[0]" % d
        d["self_outshp0"] = "-1"
        d["self_outshp1"] = "-1"
        d["self_imshp0"] = "PyArray_DIMS(%(img2d)s)[1]" % d
        d["self_imshp1"] = "PyArray_DIMS(%(img2d)s)[2]" % d
        d["self_imshp2"] = "PyArray_DIMS(%(img2d)s)[3]" % d
        d["self_kshp0"] = "PyArray_DIMS(%(filtersflipped)s)[2]" % d
        d["self_kshp1"] = "PyArray_DIMS(%(filtersflipped)s)[3]" % d
        d["assert_size"] = ""

        # Override the default value if we have it
        if self.kshp[0] is not None:
            expected = d["self_kshp0"]
            value = self.kshp[0]
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the number of rows in the filter "
            "(%%ld) isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_kshp0"] = self.kshp[0]
        if self.kshp[1] is not None:
            expected = d["self_kshp1"]
            value = self.kshp[1]
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the number of columns in the filter "
            "(%%ld) isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_kshp1"] = self.kshp[1]
        if self.outshp[0] is not None:
            expected = "dim_zz[0]"
            value = self.outshp[0]
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the number of rows in the output "
            "(%%ld) isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_outshp0"] = self.outshp[0]
        if self.outshp[1] is not None:
            expected = "dim_zz[1]"
            value = self.outshp[1]
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the number of columns in the output "
            "(%%ld) isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_outshp1"] = self.outshp[1]
        if self.imshp[0] is not None:
            expected = d["self_imshp0"]
            value = self.imshp[0]
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the image stack size (%%ld) "
            "isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            expected = "kerns_dim[1]"
            value = self.imshp[0]
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the kernel stack size (%%ld) "
            "isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_imshp0"] = self.imshp[0]
        if self.imshp[1] is not None:
            expected = d["self_imshp1"]
            value = self.imshp[1]
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the number of rows in the image "
            "(%%ld) isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_imshp1"] = self.imshp[1]
        if self.imshp[2] is not None:
            expected = d["self_imshp2"]
            value = self.imshp[2]
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the number of columns in the image "
            "(%%ld) isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_imshp2"] = self.imshp[2]
        if self.bsize is not None:
            expected = d["self_bsize"]
            value = self.bsize
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the batch size (%%ld) "
            "isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_bsize"] = self.bsize
        if self.nkern is not None:
            expected = d["self_nkern"]
            value = self.nkern
            d["assert_size"] += """
if(%(value)s != %(expected)s){
    PyErr_Format(PyExc_ValueError,
            "The hardcoded shape for the number of kernels in the filter "
            "(%%ld) isn't the run time shape (%%ld).",
            (long)%(value)s, (long)%(expected)s);
    %(fail)s;
}
            """ % dict(expected=expected, value=value, **sub)
            d["self_nkern"] = self.nkern

        # Other hard coded stuff only if we have all shapes
        if all_shape:
            d["self_kshp_logical_r"] = self.kshp_logical[0]
            d["self_kshp_logical_c"] = self.kshp_logical[1]
            d["self_kshp_logical_stride_r"] = int(np.ceil(
                self.kshp_logical[0] / float(self.kshp[0])))
            d["self_kshp_logical_stride_c"] = int(np.ceil(
                self.kshp_logical[1] / float(self.kshp[1])))
            d["self_imshp_logical_r"] = self.imshp_logical[1]
            # numpy.B. 1  not 0
            d["self_imshp_logical_c"] = self.imshp_logical[2]
            # numpy.B. 2  not 1
            d["self_imshp_logical_stride_r"] = int(np.ceil(
                self.imshp_logical[1] / float(self.imshp[1])))
            d["self_imshp_logical_stride_c"] = int(np.ceil(
                self.imshp_logical[2] / float(self.imshp[2])))
            if not self.imshp[0] == 1:
                d["affectation"] = "+="
            d["all_shape"] = "1"
            d["dim_zz_const"] = "const"
            d["dim_zz_affect"] = ""
        else:
            d["affectation"] = "+="
            d["all_shape"] = "0"
            d["dim_zz_const"] = ""
            d["dim_zz_affect"] = """
  if (mode == FULL) {
    dim_zz[0] = (int)ceil((dim_im[0]+dim_ker0-1)/float(%(self_dx)s));
    dim_zz[1] = (int)ceil((dim_im[1]+dim_ker1-1)/float(%(self_dy)s));
  } else {
    dim_zz[0] = (int)ceil((dim_im[0]-dim_ker0+1)/float(%(self_dx)s));
    dim_zz[1] = (int)ceil((dim_im[1]-dim_ker1+1)/float(%(self_dy)s));
  }
""" % d
            d["assert_size"] += """
// Check the stack size of the filter and images are equals
if(kerns_dim[1] != img2d_dim[1]){
    PyErr_Format(PyExc_ValueError,
            "the filter stack size (%%ld) and image stack size (%%ld) differ",
            (long)kerns_dim[1], (long)img2d_dim[1]);
    %(fail)s;
}
            """ % sub

        if self.kshp_logical_top_aligned:
            d["self_kshp_logical_offset_r"] = 0
            d["self_kshp_logical_offset_c"] = 0
        elif all_shape:
            rstride = d["self_kshp_logical_stride_r"]
            cstride = d["self_kshp_logical_stride_c"]
            d["self_kshp_logical_offset_r"] = (self.kshp_logical[0] -
                                               (self.kshp[0] * rstride) -
                                               1 + rstride) % rstride
            d["self_kshp_logical_offset_c"] = (self.kshp_logical[1] -
                                               (self.kshp[1] * cstride) -
                                               1 + cstride) % cstride
            del rstride, cstride

        if node.inputs[0].type.dtype == "float32":
            d["type"] = "float"
        elif node.inputs[0].type.dtype == "float64":
            d["type"] = "double"
        else:
            raise Exception("Type %s not implemented" %
                            node.inputs[0].type.dtype)
        d["gemm"] = 'dgemm_'
        if not d["type"] == "double":
            d["gemm"] = 'sgemm_'

        if self.imshp != self.imshp_logical or self.kshp != self.kshp_logical:
            if self.verbose:
                _logger.debug("return imshp!=imshp_logical or"
                              " self.kshp != self.kshp_logical shape version")
            return _conv_op_code_a % d

        if self.unroll_patch:
            if self.verbose:
                _logger.debug("return unroll patch version. all_shape=%s",
                              all_shape)
            return _conv_op_code_unroll_patch % d
        if ((self.unroll_batch is not None and self.unroll_batch > 0) or
                (self.unroll_kern is not None and self.unroll_kern > 0)):
            assert self.unroll_batch > 0
            assert self.unroll_kern > 0
            if self.verbose:
                _logger.debug("return unrolled batch (%s) and kern code (%s)",
                              str(self.unroll_batch), str(self.unroll_kern))
            return gen_conv_code_unroll_batch_kern(d, self.unroll_batch,
                                                   self.unroll_kern)

        # TODO: should we choose the unroll size automatically with the bigger divisor under 5?
        if self.out_mode == 'valid' and self.dx == 0 and self.dy == 0:
            if self.verbose:
                _logger.debug("return gemm version")
            return _conv_op_code_valid_gemm % d
        else:
            if self.verbose:
                _logger.debug("return no gemm version")
            return _conv_op_code_a % d


_conv_op_code_a = """
const int mode=%(mode)s;
int typenum=0, typenum_f=0;
PyArrayObject *ain1=NULL, *ain2=NULL;
PyArrayObject *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;
const %(type)s fill_value = 0;

int type_im=PyArray_TYPE(%(img2d)s);
int type_ker=PyArray_TYPE(%(filtersflipped)s);

npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};
npy_intp dim_im_phys[2]={%(self_imshp1)s,%(self_imshp2)s};
npy_intp dim_im_log[2]={%(self_imshp_logical_r)s,%(self_imshp_logical_c)s};
npy_intp dim_ker_phys[2]={%(self_kshp0)s,%(self_kshp1)s};
npy_intp dim_ker_log[2]={%(self_kshp_logical_r)s,%(self_kshp_logical_c)s};

PyArray_Dims img2d_shape;
npy_intp img2d_dim[4]={1,1,0,0};
img2d_shape.ptr=img2d_dim;
img2d_shape.len=4;

PyArray_Dims kerns_shape;
npy_intp kerns_dim[4]={1,1,0,0};
kerns_shape.ptr=kerns_dim;
kerns_shape.len=4;
PyObject *img2d=NULL, *contig, *filtersflipped=NULL;


if(PyArray_NDIM(%(img2d)s)==2){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==3){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==4){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];
  img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else {
    PyErr_SetString(PyExc_ValueError, "img don't have a good shape");
    %(fail)s;
}

if(PyArray_NDIM(%(filtersflipped)s)==3){
  kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];
  kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];
  kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else if(PyArray_NDIM(%(filtersflipped)s)==4){
  kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];
  kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];
  kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];
  kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else{
    std::stringstream temp;
    temp << "nddim="<<PyArray_NDIM(%(filtersflipped)s);
    std::string param = temp.str();
    PyErr_SetString(PyExc_ValueError,
      ("kernel don't have a good shape. " + param).c_str());
    %(fail)s;
}

%(assert_size)s

img2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);
img2d_arr = (PyArrayObject*)img2d;
if ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))
     || (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){
    contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));
    Py_DECREF(img2d);
    img2d = contig;
    img2d_arr = (PyArrayObject*)img2d;
    if (!PyArray_ISCONTIGUOUS(img2d_arr)){
        PyErr_SetString(PyExc_ValueError, "img2d isn't contiguous");
        %(fail)s;
    }
}

filtersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if ((PyArray_STRIDES(filtersflipped_arr)[3] != (npy_intp)sizeof(%(type)s))
     || (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*(npy_intp)sizeof(%(type)s))){
    contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));
    Py_DECREF(filtersflipped);
    filtersflipped = contig;
    filtersflipped_arr = (PyArrayObject*)filtersflipped;
    if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){
        PyErr_SetString(PyExc_ValueError, "filtersflipped isn't contiguous");
        %(fail)s;
    }
}

if(mode != VALID && mode != FULL){
  PyErr_SetString(PyExc_ValueError,
                  "invalid mode, only full and valid are supported");
  %(fail)s;
}
typenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);
typenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);
if (typenum < 0) {PyErr_SetString(PyExc_ValueError, "Invalid type"); %(fail)s;}
if (typenum != typenum_f) {
  PyErr_SetString(PyExc_ValueError, "Input types must match");
  %(fail)s;
}

if (!img2d)
{
    PyErr_SetString(PyExc_AssertionError, "!img2d");
    %(fail)s;
}
if (!filtersflipped)
{
    PyErr_SetString(PyExc_AssertionError, "!filtersflipped");
    %(fail)s;
}

if ((!%(z)s)
  || *PyArray_DIMS(%(z)s)!=4
  ||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)
  ||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)
  ||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])
  ||(PyArray_DIMS(%(z)s)[3] != dim_zz[1])
  ||!PyArray_ISCONTIGUOUS(%(z)s)
  )
{
  {Py_XDECREF(%(z)s);}
  npy_intp dims[4] = {0,0,0,0};
  dims[0]=%(self_bsize)s;
  dims[1]=%(self_nkern)s;
  dims[2]=dim_zz[0];
  dims[3]=dim_zz[1];
  %(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}else{
  //PyArray_FILLWBYTE((PyObject*)%(z)s,0);
}
z_arr = (PyArrayObject*) %(z)s;

int Os[2];
Os[0]=%(self_outshp0)s;
Os[1]=%(self_outshp1)s;

//assertions
if (!PyArray_ISCONTIGUOUS(%(z)s))
{
    PyErr_SetString(PyExc_AssertionError, "Output (%(z)s) not contiguous");
    %(fail)s;
}

for(int b=0;b< %(self_bsize)s;b++){
  for(int n_kern=0;n_kern<%(self_nkern)s;n_kern++){

    %(type)s * __restrict__ out=(%(type)s *)(PyArray_GETPTR2(z_arr,b,n_kern));
    for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out[i] = 0;

    for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){

      const %(type)s * __restrict__ in=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b,stack_size));
      const %(type)s * __restrict__ hvals=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern,stack_size));


      for (int iter_m=0; iter_m < Os[0]; iter_m++) {
        // Reposition index into input image based on requested output size
        //row position in logical output image
        int pos_m = iter_m*%(self_dx)s;
        //row anchor in logical input image (we will loop upward from here)
        int new_m;
        if (mode == FULL) new_m = pos_m ;
        else new_m = (pos_m+dim_ker_log[0]-1);

        for (int iter_n=0; iter_n < Os[1]; iter_n++) {  // loop over columns
          // current col position in logical output image
          int pos_n=iter_n*%(self_dy)s;
          %(type)s sum=0;

          // Sum over kernel, if index into image is out of bounds
          // fill with the value
          // loop over logical rows in kernel
          for (int j_log=0; j_log < %(self_kshp_logical_r)s; j_log++) {
            // ind0_log: row position in logical input image
            int ind0_log = (new_m-j_log);

            if ((j_log < %(self_kshp_logical_offset_r)s) ||
                (j_log - %(self_kshp_logical_offset_r)s) MOD %(self_kshp_logical_stride_r)s)
                continue;

            if (ind0_log MOD %(self_imshp_logical_stride_r)s)
                continue;

            int j_phys = ((j_log- %(self_kshp_logical_offset_r)s) /
                          %(self_kshp_logical_stride_r)s);
            int ind0_phys = (ind0_log / %(self_imshp_logical_stride_r)s);
            //std::cerr <<"j_log" << j_log << " j_phys " << j_phys << " " << ind0_phys << "\\n";

            if(mode==FULL){
              //This is a pointer to the current row of the kernel
              const %(type)s * idx_hvals=&hvals[j_phys*dim_ker_phys[1]];
              if(ind0_log < 0 || ind0_log >= dim_im_log[0]){
                   // the current row of the kernel is off the image
              }else{
                int k = max((int)(pos_n-dim_im_log[1])+1,0);
                int max_k=min(pos_n+1,(int)dim_ker_log[1]);
                const %(type)s * idx_in=&in[ind0_phys*dim_im_phys[1]];
                for (int ind1_log=pos_n-k; k<max_k; k++,ind1_log--) {
                    if (1)
                    {
                                if ((k < %(self_kshp_logical_offset_c)s) ||
                                    (k - %(self_kshp_logical_offset_c)s) MOD
                                    %(self_kshp_logical_stride_c)s)
                                    continue;

                                if (ind1_log MOD
                                    %(self_imshp_logical_stride_c)s)
                                    continue;
                    }
                  sum += idx_hvals[(k-%(self_kshp_logical_offset_c)s) /
                                   %(self_kshp_logical_stride_c)s] *
                            idx_in[ind1_log / %(self_imshp_logical_stride_c)s];
                }
              }
            }else{ // mode==VALID
              //JB: should be dim_im[1] right? (was dim_im[0])
              const %(type)s* idx_in=&in[ind0_phys*dim_im_phys[1]];
              const %(type)s* idx_hvals=&hvals[j_phys*dim_ker_phys[1]];
              int new_n = (pos_n+dim_ker_log[1]-1);
              if (%(self_imshp_logical_stride_c)s != 1)  // a general loop
              {
                  for (int k=0,last=new_n; k < dim_ker_log[1]; k++,last--) {
                        if ((k < %(self_kshp_logical_offset_c)s) ||
                            (k - %(self_kshp_logical_offset_c)s) MOD
                            %(self_kshp_logical_stride_c)s)
                            continue;

                        else if (last MOD %(self_imshp_logical_stride_c)s)
                            continue;
                            else
                            {
                    sum+=idx_hvals[(k-%(self_kshp_logical_offset_c)s) /
                                   %(self_kshp_logical_stride_c)s] *
                             idx_in[last/%(self_imshp_logical_stride_c)s];
                    }
                  }
              }
              else  // self_imshp_stride_c == 1
              {
                  int offset = %(self_kshp_logical_offset_c)s;
                  int k_phys=0;
                  for (int k_log=offset,last=new_n-offset;
                       k_log < dim_ker_log[1]; ) {
                    sum += idx_hvals[k_phys]*idx_in[last];
                    ++k_phys;
                    last -= %(self_kshp_logical_stride_c)s;
                    k_log += %(self_kshp_logical_stride_c)s;
                  }
              }
            }
          }//for j_log
          out[iter_m*dim_zz[1]+iter_n] %(affectation)s sum;
        }//for iter_n
      }//for iter_m
    }//for stack_size
    if (0 && (mode==FULL)){
      for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i)
        std::cout << " " << out[i];
      std::cout << "\\n";
    }
  }//for n_kern
}//for b
Py_XDECREF(img2d);
Py_XDECREF(filtersflipped);
"""


#########
# ConvOp c_code for valid mode (uses gemm)
#########

_conv_op_code_valid_gemm = """
int typenum=0, typenum_f=0;
PyArrayObject *ain1=NULL, *ain2=NULL, *img2d_arr=NULL, *z_arr=NULL;
const int NKERN = %(self_nkern)s;

int type_im=PyArray_TYPE(%(img2d)s);
int type_ker=PyArray_TYPE(%(filtersflipped)s);

npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};
npy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};
const npy_intp dim_ker0=%(self_kshp0)s;
const npy_intp dim_ker1=%(self_kshp1)s;

PyArray_Dims img2d_shape;
npy_intp img2d_dim[4]={1,1,0,0};
img2d_shape.ptr=img2d_dim;
img2d_shape.len=4;

PyArray_Dims kerns_shape;
npy_intp kerns_dim[4]={1,1,0,0};
kerns_shape.ptr=kerns_dim;
kerns_shape.len=4;
PyObject *img2d=NULL, *contig;

if(PyArray_NDIM(%(img2d)s)==2){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==3){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==4){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];
  img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else {
    PyErr_SetString(PyExc_ValueError, "img don't have a good shape");
    %(fail)s;
}

if(PyArray_NDIM(%(filtersflipped)s)==3){
  kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];
  kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];
  kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else if(PyArray_NDIM(%(filtersflipped)s)==4){
  kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];
  kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];
  kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];
  kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else{
    std::stringstream temp;
    temp << "nddim="<<PyArray_NDIM(%(filtersflipped)s);
    std::string param = temp.str();
    PyErr_SetString(PyExc_ValueError,
      ("kernel don't have a good shape. " + param).c_str());
    %(fail)s;
}
if (NKERN != kerns_dim[0])
{
    PyErr_SetString(PyExc_NotImplementedError, "nonsense nkern");
    %(fail)s;
}

img2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);
img2d_arr = (PyArrayObject*)img2d;
if ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))
     || (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){
    contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));
    Py_DECREF(img2d);
    img2d = contig;
    img2d_arr = (PyArrayObject*)img2d;
    if (!PyArray_ISCONTIGUOUS(img2d_arr)){
        PyErr_SetString(PyExc_ValueError, "img2d isn't contiguous");
        %(fail)s;
    }
}

typenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);
typenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);
if (typenum < 0) {PyErr_SetString(PyExc_ValueError, "Invalid type"); %(fail)s;}
if (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, "Input types must match"); %(fail)s;}

if (!img2d) {
    PyErr_SetString(PyExc_ValueError, "Null argument img2d");
    %(fail)s;
}
if ((!%(z)s)
  || *PyArray_DIMS(%(z)s)!=4
  ||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)
  ||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)
  ||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])
  || (PyArray_DIMS(%(z)s)[3] != dim_zz[1])
  )
{
  {Py_XDECREF(%(z)s);}
  npy_intp dims[4] = {0,0,0,0};
  dims[0]=%(self_bsize)s;
  dims[1]=%(self_nkern)s;
  dims[2]=dim_zz[0];
  dims[3]=dim_zz[1];
  %(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}else{
  PyArray_FILLWBYTE((PyObject*)%(z)s,0);
}
z_arr = (PyArrayObject*) %(z)s;

%(assert_size)s

int Os[2];
Os[0] = dim_im[0]-dim_ker0+1;
Os[1] = dim_im[1]-dim_ker1+1;

// allocate a temporary buffer for storing the inner product of each nth kernel row
// with each row of an image
{
%(type)s * kbuf = (%(type)s *)malloc((Os[0] * NKERN + PyArray_Size((PyObject*)%(filtersflipped)s))* (npy_intp)sizeof(%(type)s));
int kbufstride = NKERN;
%(type)s * myfilters = kbuf + Os[0] * NKERN;

//copy out filtersflipped into filters un-flipped format
//std::cerr << "__filling myfilters__\\n";
for(int i=0;i < kerns_dim[0];++i){
    for(int j=0;j < kerns_dim[1];++j){
        for(int k=0;k < kerns_dim[2];++k){
            for(int l=0;l < kerns_dim[3];++l){
                %(type)s * ff = ((PyArray_NDIM(%(filtersflipped)s)) == 3)
                    ? (%(type)s *)PyArray_GETPTR3(%(filtersflipped)s, i, kerns_dim[2]-1-k, kerns_dim[3]-1-l)
                    : (%(type)s *)PyArray_GETPTR4(%(filtersflipped)s, i, j, kerns_dim[2]-1-k, kerns_dim[3]-1-l);
                myfilters[i * (kerns_dim[1]*kerns_dim[2]*kerns_dim[3])
                          + j * (kerns_dim[2]*kerns_dim[3])
                          + k * (kerns_dim[3])
                          + l] = ff[0];
                //std::cerr << " " << ff[0];
            }
            //std::cerr << "\\n";
        }
        //std::cerr << "(end of stack/batch " <<j << "/" << i << "  ) \\n";
    }
}

//std::cerr << "-----new loop ----\\n";
for(int b=0;b< %(self_bsize)s;b++){
    for (int img_col = 0; img_col < Os[1]; ++img_col){
        for (int filter_row = 0; filter_row < kerns_dim[2]; ++filter_row){
            for (int stackidx = 0; stackidx < %(self_imshp0)s; ++stackidx){
                %(type)s * img_colview =
                    (%(type)s *)(PyArray_GETPTR4(img2d, b, stackidx, filter_row, img_col));
                %(type)s * filter_rows = myfilters + stackidx * (kerns_dim[2]*kerns_dim[3]) +
                filter_row * kerns_dim[3];
                //std::cerr << "filterview offset: " << filter_rows - myfilters << "\\n";

                char N = 'N'; char T = 'T';
                int Nz0 = Os[0];
                int Nz1 = NKERN;
                int K = kerns_dim[3];
                %(type)s alpha = 1.0;
                %(type)s beta = stackidx ? 1.0 : 0.0;
                int imgview_stride = dim_im[1];
                int filter_rows_stride =kerns_dim[1]*kerns_dim[2]*kerns_dim[3];
                //remember, Fortran wants a column-major interpretation
                assert(PyArray_STRIDES(img2d)[3] == (npy_intp)sizeof(%(type)s));

                if (0){
                    std::cerr << "b " << b << " img_col " << img_col << " filterrow " << filter_row << " stackidx " <<stackidx << "\\n";
                    std::cerr << "colview (physical layout) stride: " << imgview_stride << "\\n";
                    for (int ii = 0; ii < Nz0; ++ii){
                        for (int jj = 0; jj < K; ++jj){
                            std::cerr << " " << img_colview[ii * imgview_stride + jj];
                        }
                        std::cerr << "\\n";
                    }
                    std::cerr << "filterview ("<<filter_row<<"'th rows) stride: " << filter_rows_stride << "\\n";
                    for (int ii = 0; ii < Nz1; ++ii){
                        for (int jj = 0; jj < K; ++jj){
                            std::cerr << " " << filter_rows[ii * filter_rows_stride + jj];
                        }
                        std::cerr << "\\n";
                    }

                    std::cerr << Nz1 << " " << Nz0 << " " << K << "\\n" ;
                }

                %(gemm)s(&T, &N,
                    &Nz1, &Nz0, &K,
                    &alpha,
                    filter_rows, &filter_rows_stride,
                    img_colview, &imgview_stride,
                    &beta, kbuf, &kbufstride);

                if (0){
                    std::cerr << "z (logical layout) beta" << beta << "\\n";
                    for (int ii = 0; ii < Nz0; ++ii){
                        for (int jj = 0; jj < Nz1; ++jj){
                            std::cerr << " " << kbuf[ii * kbufstride + jj];
                        }
                        std::cerr << "\\n";
                    }
                }
            }
            // now kbuf the sum over the stack, put it into the outbuf
            for (int img_row = 0; img_row < Os[0]; ++img_row) {
                for (int kernel_idx = 0; kernel_idx < NKERN; ++kernel_idx) {
                    %(type)s * z_p =  (%(type)s *)PyArray_GETPTR4(%(z)s, b, kernel_idx, img_row, img_col);
                    if (0)
                    {
                        if (b >= PyArray_DIMS(%(z)s)[0]) %(fail)s;
                        if (kernel_idx >= PyArray_DIMS(%(z)s)[1]) %(fail)s;
                        if (img_row >= PyArray_DIMS(%(z)s)[2]) %(fail)s;
                        if (img_col >= PyArray_DIMS(%(z)s)[3]) %(fail)s;
                    }
                    z_p[0] += kbuf[img_row * kbufstride + kernel_idx];
                }
            }
        }
    }
}
free(kbuf);
}
Py_XDECREF(img2d);
"""


def gen_conv_code_unroll_batch_kern(d, unroll_bsize=1, unroll_ksize=1):
    """
    c_code for ConvOp that unroll the batch size loop.

    """
    assert unroll_bsize > 0 and unroll_ksize > 0
    if "unroll_bsize" in d or "unroll_ksize" in d or "unroll_iter" in d or "unroll_biter" in d or "unroll_kiter" in d:
        raise Exception("We can't use this dictionary as we will overwrite some of its containt")
    d = d.copy()

    d["unroll_bsize"] = unroll_bsize
    d["unroll_ksize"] = unroll_ksize

    def my_dup(st, size):
        s = ""
        for i in xrange(size):
            d["unroll_iter"] = i
            s += st % d
        return s + "\n"

    def my_dup2(st):
        s = ""
        iter = 0
        for i in xrange(unroll_bsize):
            d["unroll_biter"] = i
            for j in xrange(unroll_ksize):
                d["unroll_kiter"] = j
                d["unroll_iter"] = iter
                iter += 1
                s += st % d
        return s + "\n"
    ret = """
const int mode=%(mode)s;
int typenum=0, typenum_f=0;
PyArrayObject *ain1=NULL, *ain2=NULL, *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;;
const %(type)s fill_value = 0;

int type_im=PyArray_TYPE(%(img2d)s);
int type_ker=PyArray_TYPE(%(filtersflipped)s);

npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};
npy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};
const npy_intp dim_ker0=%(self_kshp0)s;
const npy_intp dim_ker1=%(self_kshp1)s;

PyArray_Dims img2d_shape;
npy_intp img2d_dim[4]={1,1,0,0};
img2d_shape.ptr=img2d_dim;
img2d_shape.len=4;

PyArray_Dims kerns_shape;
npy_intp kerns_dim[4]={1,1,0,0};
kerns_shape.ptr=kerns_dim;
kerns_shape.len=4;
PyObject *img2d=NULL, *contig, *filtersflipped=NULL;

if(PyArray_NDIM(%(img2d)s)==2){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==3){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==4){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];
  img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else {
    std::stringstream temp;
    temp << "nddim="<<PyArray_NDIM(%(img2d)s);
    std::string param = temp.str();
    PyErr_SetString(PyExc_ValueError,
      ("img don't have a good shape. " + param).c_str());
    %(fail)s;
}

if(PyArray_NDIM(%(filtersflipped)s)==3){
  kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];
  kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];
  kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else if(PyArray_NDIM(%(filtersflipped)s)==4){
  kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];
  kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];
  kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];
  kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else{
    PyErr_SetString(PyExc_ValueError, "kernel don't have a good shape");
    %(fail)s;
}

%(assert_size)s

img2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);
img2d_arr = (PyArrayObject*)img2d;
if ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))
     || (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){
    contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));
    Py_DECREF(img2d);
    img2d = contig;
    img2d_arr = (PyArrayObject*)img2d;
    if (!PyArray_ISCONTIGUOUS(img2d_arr)){
        PyErr_SetString(PyExc_ValueError, "img2d isn't contiguous");
        %(fail)s;
    }
}

filtersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if ((PyArray_STRIDES(filtersflipped_arr)[3] != (npy_intp)sizeof(%(type)s))
     || (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*(npy_intp)sizeof(%(type)s))){
    contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));
    Py_DECREF(filtersflipped);
    filtersflipped = contig;
    filtersflipped_arr = (PyArrayObject*)filtersflipped;
    if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){
        PyErr_SetString(PyExc_ValueError, "filtersflipped isn't contiguous");
        %(fail)s;
    }
}

if(mode != VALID && mode != FULL){
  PyErr_SetString(PyExc_ValueError, "invalid mode, only full and valid are supported"); %(fail)s;
}
typenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);
typenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);
if (typenum < 0) {PyErr_SetString(PyExc_ValueError, "Invalid type"); %(fail)s;}
if (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, "Input types must match"); %(fail)s;}

if (!img2d)
{
    PyErr_SetString(PyExc_AssertionError, "!img2d");
    %(fail)s;
}
if (!filtersflipped)
{
    PyErr_SetString(PyExc_AssertionError, "!filtersflipped");
    %(fail)s;
}

if ((!%(z)s)
  || *PyArray_DIMS(%(z)s)!=4
  ||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)
  ||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)
  ||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])
  ||(PyArray_DIMS(%(z)s)[3] != dim_zz[1])
  ||!PyArray_ISCONTIGUOUS(%(z)s)
  )
{
  {Py_XDECREF(%(z)s);}
  npy_intp dims[4] = {0,0,0,0};
  dims[0]=%(self_bsize)s;
  dims[1]=%(self_nkern)s;
  dims[2]=dim_zz[0];
  dims[3]=dim_zz[1];
  %(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}else{
  //PyArray_FILLWBYTE((PyObject*)%(z)s,0);
}
z_arr = (PyArrayObject*) %(z)s;

int Os[2];
Os[0]=%(self_outshp0)s;
Os[1]=%(self_outshp1)s;

//assertions
if (!PyArray_ISCONTIGUOUS(%(z)s))
{
    PyErr_SetString(PyExc_AssertionError, "Output (%(z)s) not contiguous");
    %(fail)s;
}

for(int b=0;b< %(self_bsize)s ;b+=%(unroll_bsize)s){
  for(int n_kern=0;n_kern<%(self_nkern)s;n_kern+=%(unroll_ksize)s){

""" % d
    ret += my_dup2("%(type)s * __restrict__ out%(unroll_iter)s=(%(type)s *)(PyArray_GETPTR2(z_arr,b+%(unroll_biter)s,n_kern+%(unroll_kiter)s));")
    ret += my_dup("for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out%(unroll_iter)s[i] = 0;", unroll_bsize * unroll_ksize)
    ret += """
    for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){
""" % d
    ret += my_dup("const %(type)s * __restrict__ in%(unroll_iter)d=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b+%(unroll_iter)s,stack_size));", unroll_bsize)
    ret += my_dup("const %(type)s * __restrict__ hvals%(unroll_iter)s=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern+%(unroll_iter)s,stack_size));", unroll_ksize)
    ret += """

      int new_m;

      for (int iter_m=0; iter_m < Os[0]; iter_m++) {
        // Reposition index into input image based on requested output size
        int pos_m = iter_m*%(self_dx)s;//The position of the patch in the image
        if (mode == FULL) new_m = pos_m ;
        else new_m = (pos_m+dim_ker0-1);

        for (int iter_n=0; iter_n < Os[1]; iter_n++) {  // loop over columns
          int pos_n=iter_n*%(self_dy)s;
        """ % d
    ret += my_dup(
        "%(type)s sum%(unroll_iter)s=0;", unroll_bsize * unroll_ksize)
    ret += """

          // Sum over kernel, if index into image is out of bounds
          // fill with the value
          for (int j=0; j < dim_ker0; j++) {
            int ind0 = (new_m-j);

            if(mode==FULL){
""" % d
    ret += my_dup("const %(type)s * idx_hvals%(unroll_iter)s=&hvals%(unroll_iter)s[j*dim_ker1];", unroll_ksize)
    ret += """
              if(ind0 < 0 || ind0 >= dim_im[0]){
                if(fill_value!=0)
                  for (int k=0; k < dim_ker1; k++) {
""" % d
    ret += my_dup2("sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;")
    ret += """
                  }
              }else{
                //do the part where kernel is to the right of the img

                int k=0,max_k=max((int)(pos_n-dim_im[1])+1,0);
                if(fill_value!=0){

                  for(k=0;k<max_k;k++){
""" % d
    ret += my_dup2("sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;")
    ret += """
                  }
                }else {k=max_k;}

                //do the part where the kernel is on the img
                max_k=min(pos_n+1,(int)dim_ker1);
""" % d
    ret += my_dup("const %(type)s * idx_in%(unroll_iter)s=&in%(unroll_iter)s[ind0*dim_im[1]];", unroll_bsize)
    ret += """
                for (int ind1=pos_n-k; k<max_k; k++,ind1--) {

""" % d
    ret += my_dup2("sum%(unroll_iter)s+= idx_hvals%(unroll_kiter)s[k] * idx_in%(unroll_biter)s[ind1];")
    ret += """
                }
                //do the part to the left of the img
                if(fill_value!=0)
                  for(;k<dim_ker1;k++){
""" % d
    ret += my_dup2("sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;")
    ret += """
                  }
              }
            }else{//valid mode
""" % d
    ret += my_dup("const %(type)s* idx_in%(unroll_iter)s=&in%(unroll_iter)s[ind0*dim_im[1]];", unroll_bsize)
    ret += my_dup("const %(type)s* idx_hvals%(unroll_iter)s=&hvals%(unroll_iter)s[j*dim_ker1];", unroll_ksize)
    ret += """
              int new_n = (pos_n+dim_ker1-1);

              for (int k=0,last=new_n; k < dim_ker1; k++,last--) {
""" % d
    ret += my_dup2("sum%(unroll_iter)s+=idx_hvals%(unroll_kiter)s[k]*idx_in%(unroll_biter)s[last];")
    ret += """
              }
            }

          }//for j
""" % d
    ret += my_dup("out%(unroll_iter)s[iter_m*dim_zz[1]+iter_n] %(affectation)s sum%(unroll_iter)s;", unroll_bsize * unroll_ksize)
    ret += """
        }//for n
      }//for m
    }//for stack_size
  }//for n_kern
}//for b
Py_XDECREF(img2d);
Py_XDECREF(filtersflipped);
"""
    return ret

_conv_op_code_unroll_patch = """
const int mode=%(mode)s;
int typenum=0, typenum_f=0;
PyArrayObject *ain1=NULL, *ain2=NULL, *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;
const %(type)s fill_value = 0;//only value of 0 are currently tested and correctly implemented

int type_im=PyArray_TYPE(%(img2d)s);
int type_ker=PyArray_TYPE(%(filtersflipped)s);

const npy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};
//The following line caused gcc 4.3.0 20080428 (Red Hat 4.3.0-8) to crash
//const npy_intp dim_ker[2]={%(self_kshp0)s,%(self_kshp1)s};
// The next line had gcc don't crash.
const npy_intp dim_ker0=%(self_kshp0)s;
const npy_intp dim_ker1=%(self_kshp1)s;
%(dim_zz_const)s npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};

%(dim_zz_affect)s
PyArray_Dims img2d_shape;
npy_intp img2d_dim[4]={1,1,0,0};
img2d_shape.ptr=img2d_dim;
img2d_shape.len=4;

PyArray_Dims kerns_shape;
npy_intp kerns_dim[4]={1,1,0,0};
kerns_shape.ptr=kerns_dim;
kerns_shape.len=4;
PyObject *img2d=NULL, *contig, *filtersflipped=NULL;

if(PyArray_NDIM(%(img2d)s)==2){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==3){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==4){
  img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];
  img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];
  img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];
  img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else {
    PyErr_Format(PyExc_ValueError,
      "image don't have a good number of dimensions %%d. ", PyArray_NDIM(%(filtersflipped)s));
    %(fail)s;
}

if(PyArray_NDIM(%(filtersflipped)s)==3){
  kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];
  kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];
  kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else if(PyArray_NDIM(%(filtersflipped)s)==4){
  kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];
  kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];
  kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];
  kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else{
    PyErr_Format(PyExc_ValueError,
      "kernel don't have a good number of dimensions %%d. ", PyArray_NDIM(%(filtersflipped)s));
    %(fail)s;
}

%(assert_size)s

img2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);
img2d_arr = (PyArrayObject*)img2d;
if ((PyArray_STRIDES(img2d_arr)[3] != sizeof(%(type)s))
     || (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*sizeof(%(type)s))){
    contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));
    Py_DECREF(img2d);
    img2d = contig;
    img2d_arr = (PyArrayObject*)img2d;
    if (!PyArray_ISCONTIGUOUS(img2d_arr)){
        PyErr_SetString(PyExc_ValueError, "img2d isn't contiguous");
        %(fail)s;
    }
}

filtersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if ((PyArray_STRIDES(filtersflipped_arr)[3] != sizeof(%(type)s))
     || (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*sizeof(%(type)s))){
    contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));
    Py_DECREF(filtersflipped);
    filtersflipped = contig;
    filtersflipped_arr = (PyArrayObject*)filtersflipped;
    if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){
        PyErr_SetString(PyExc_ValueError, "filtersflipped isn't contiguous");
        %(fail)s;
    }
}

if(mode != VALID && mode != FULL){
  PyErr_SetString(PyExc_ValueError, "invalid mode, only full and valid are supported"); %(fail)s;
}

if(dim_zz[0]<=0 || dim_zz[1]<=0){
PyErr_Format(PyExc_ValueError,
      "Output dimensions are not valid %%ldx%%ld",(long int)dim_zz[0],(long int)dim_zz[1]);
      %(fail)s;
}

typenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);
typenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);
if (typenum < 0) {PyErr_SetString(PyExc_ValueError, "Invalid type"); %(fail)s;}
if (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, "Input types must match"); %(fail)s;}

if (!img2d) %(fail)s;
if (!filtersflipped) %(fail)s;
if ((!%(z)s)
  || *PyArray_DIMS(%(z)s)!=4
  ||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)
  ||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)
  ||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])
  || (PyArray_DIMS(%(z)s)[3] != dim_zz[1])
  )
{
  if (%(z)s) Py_DECREF(%(z)s);
  npy_intp dims[4] = {0,0,0,0};
  if(!dims) %(fail)s;
  dims[0]=%(self_bsize)s;
  dims[1]=%(self_nkern)s;
  dims[2]=dim_zz[0];
  dims[3]=dim_zz[1];
  %(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}else{
  //PyArray_FILLWBYTE((PyObject*)%(z)s,0);
}
z_arr = (PyArrayObject*) %(z)s;

// assert the output is C-contiguous
if (!PyArray_ISCONTIGUOUS(%(z)s))
{
    PyErr_SetString(PyExc_AssertionError, "Output (%(z)s) not contiguous");
    %(fail)s;
}

//The if on the number of loop make a speed up for small array.
//with g++ 4.5.1. The compiler should be smart enough to do this himself!
#pragma omp parallel for schedule(static) if(%(self_bsize)s * %(self_nkern)s > 1)
// We merge the 2 loop into one to make it easier to parallelize on both
// This is the equivalent of those 2 lines.
//for(int b=0;b< %(self_bsize)s;b++){
// for(int n_kern=0;n_kern<%(self_nkern)s;n_kern++){
for(int batch_kern_idx=0;
    batch_kern_idx < %(self_bsize)s * %(self_nkern)s;
    batch_kern_idx++){
    int b = batch_kern_idx / %(self_nkern)s;
    int n_kern = batch_kern_idx %% %(self_nkern)s;

    %(type)s * __restrict__ out=(%(type)s *)(PyArray_GETPTR2(z_arr,b,n_kern));
    for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out[i] = 0;

    for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){

      const %(type)s * __restrict__ in=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b,stack_size));
      const %(type)s * __restrict__ hvals=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern,stack_size));

      int new_m;

      for (int iter_m=0; iter_m < dim_zz[0]; iter_m++) {
        // Reposition index into input image based on requested output size
        int pos_m = iter_m*%(self_dx)s;//The position of the patch in the image
        if (mode == FULL) new_m = pos_m ;
        else new_m = (pos_m+dim_ker0-1);

        for (int iter_n=0; iter_n < dim_zz[1]; iter_n++) {  // loop over columns
          int pos_n=iter_n*%(self_dy)s;
          %(type)s sum=0;
          %(type)s sum2=0;
          %(type)s sum3=0;
          %(type)s sum4=0;
          int nb_sum=0;
          // Sum over kernel, if index into image is out of bounds
          // fill with the value
          for (int j=0; j < dim_ker0; j++) {
            int ind0 = (new_m-j);

            if(mode==FULL){
              const %(type)s * idx_hvals=&hvals[j*dim_ker1];
              if(ind0 < 0 || ind0 >= dim_im[0]){
                if(fill_value!=0)
                  for (int k=0; k < dim_ker1; k++) {
                    sum+= idx_hvals[k] * fill_value;
                  }
              }else{
                //do the part where kernel is to the right of the img
                int k=0,max_k=max((int)(pos_n-dim_im[1])+1,0);
                if(fill_value!=0){

                  for(k=0;k<max_k;k++){
                    sum+= idx_hvals[k]*fill_value;
                  }
                }else {k=max_k;}

                //do the part where the kernel is on the img
                max_k=min(pos_n+1,(int)dim_ker1);
                const %(type)s * idx_in=&in[ind0*dim_im[1]];

                if(iter_n + 4*%(self_dy)s < dim_zz[1]
                         && iter_n>dim_ker1-1
                         && iter_n<dim_im[1]-dim_ker1+1-3){
                  nb_sum=4;
                  for (int ind1=pos_n-k; k<max_k; k++,ind1--) {
                    sum+=idx_hvals[k]*idx_in[ind1];
                    sum2+=idx_hvals[k]*idx_in[ind1+%(self_dy)s];
                    sum3+=idx_hvals[k]*idx_in[ind1+2*%(self_dy)s];
                    sum4+=idx_hvals[k]*idx_in[ind1+3*%(self_dy)s];
                  }
                }else if(iter_n + 2*%(self_dy)s < dim_zz[1]
                         && iter_n>dim_ker1-1
                         && iter_n<dim_im[1]-dim_ker1+1){
                  nb_sum=2;
                  for (int ind1=pos_n-k; k<max_k; k++,ind1--) {
                    sum+=idx_hvals[k]*idx_in[ind1];
                    sum2+=idx_hvals[k]*idx_in[ind1+%(self_dy)s];
                  }
                }else{
                  nb_sum=1;
                  /*
                  %(type)s sum_=0;
                  if((k-max_k) & 0x1 != 0){
                    sum+= idx_hvals[k] * idx_in[pos_n-k];
                  }
                  for (int ind1=pos_n-k; k<max_k; k+=2,ind1-=2) {
                    sum+= idx_hvals[k] * idx_in[ind1];
                    sum_+= idx_hvals[k+1] * idx_in[ind1-1];
                  }
                  sum+=sum_;
                  */
                  for (int ind1=pos_n-k; k<max_k; k++,ind1--) {
                    sum+=idx_hvals[k]*idx_in[ind1];
                  }
                }
                //do the part to the left of the img
                if(fill_value!=0)
                  for(;k<dim_ker1;k++) sum+= idx_hvals[k]*fill_value;
              }
            }else{//valid mode
              const %(type)s* idx_in=&in[ind0*dim_im[1]];
              const %(type)s* idx_hvals=&hvals[j*dim_ker1];
              if(iter_n + 4*%(self_dy)s < dim_zz[1]){
                nb_sum=4;
                for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {
                  sum+=idx_hvals[k]*idx_in[im_idx];
                  sum2+=idx_hvals[k]*idx_in[im_idx+%(self_dy)s];
                  sum3+=idx_hvals[k]*idx_in[im_idx+2*%(self_dy)s];
                  sum4+=idx_hvals[k]*idx_in[im_idx+3*%(self_dy)s];
                }
              }else if(iter_n + 2*%(self_dy)s < dim_zz[1]){
                nb_sum=2;
                for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {
                  sum+=idx_hvals[k]*idx_in[im_idx];
                  sum2+=idx_hvals[k]*idx_in[im_idx+%(self_dy)s];
                }
              }else{
                nb_sum=1;
                for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {
                  sum+=idx_hvals[k]*idx_in[im_idx];
                }
              }
            }//else valid mode
          }//for j
          switch(nb_sum){
          case 4: out[iter_m*dim_zz[1]+iter_n+3] %(affectation)s sum4;
          case 3: out[iter_m*dim_zz[1]+iter_n+2] %(affectation)s sum3;
          case 2: out[iter_m*dim_zz[1]+iter_n+1] %(affectation)s sum2;
          case 1: out[iter_m*dim_zz[1]+iter_n] %(affectation)s sum;
          }
          iter_n+=nb_sum-1;
        }//for iter_n
      }//for iter_m
    }//for stack_size
}//for b and n_kern

Py_XDECREF(img2d);
Py_XDECREF(filtersflipped);
"""
