import warnings

import theano

from .. import init
from .. import nonlinearities

from .base import Layer

from .conv import conv_output_length, BaseConvLayer
from ..utils import as_tuple

# check if Theano's new GPU backend is available and in use
try:
    from theano import gpuarray as gpu
except ImportError:
    from theano.sandbox import gpuarray as gpu
gpu_enabled = gpu.pygpu_activated
# if not, try to fall back to Theano's old GPU backend
if not gpu_enabled:
    try:
        from theano.sandbox import cuda as gpu
    except Exception:  # Theano 0.10+ raises nose.SkipTest
        gpu_enabled = False
    else:
        gpu_enabled = gpu.cuda_enabled
# if either of the backends is available, use it, otherwise bail out
if gpu_enabled:
    gpu_contiguous = gpu.basic_ops.gpu_contiguous
    GpuCorrMM = gpu.blas.GpuCorrMM
else:
    raise ImportError(
        "requires GPU support -- see http://lasagne.readthedocs.org/en/"
        "latest/user/installation.html#gpu-support")  # pragma: no cover

if theano.config.floatX == 'float64':
    warnings.warn("You are using a GPU layer with Theano configured for "
                  "double precision (floatX=float64). Depending on your "
                  "Theano version and GPU, this may be slow or unsupported. "
                  "We recommend to configure Theano for single precision "
                  "(floatX=float32); see http://lasagne.readthedocs.org/en/"
                  "latest/user/installation.html#gpu-support.")

__all__ = [
    "Conv2DMMLayer",
]


class Conv2DMMLayer(BaseConvLayer):
    """
    lasagne.layers.Conv2DMMLayer(incoming, num_filters, filter_size,
    stride=(1, 1), pad=0, untie_biases=False,
    W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
    nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,
    **kwargs)

    2D convolutional layer

    Performs a 2D convolution on its input and optionally adds a bias and
    applies an elementwise nonlinearity.  This is an alternative implementation
    which uses ``theano.sandbox.cuda.blas.GpuCorrMM`` directly.

    Parameters
    ----------
    incoming : a :class:`Layer` instance or a tuple
        The layer feeding into this layer, or the expected input shape. The
        output of this layer should be a 4D tensor, with shape
        ``(batch_size, num_input_channels, input_rows, input_columns)``.

    num_filters : int
        The number of learnable convolutional filters this layer has.

    filter_size : int or iterable of int
        An integer or a 2-element tuple specifying the size of the filters.

    stride : int or iterable of int
        An integer or a 2-element tuple specifying the stride of the
        convolution operation.

    pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
        By default, the convolution is only computed where the input and the
        filter fully overlap (a valid convolution). When ``stride=1``, this
        yields an output that is smaller than the input by ``filter_size - 1``.
        The `pad` argument allows you to implicitly pad the input with zeros,
        extending the output size.

        A single integer results in symmetric zero-padding of the given size on
        all borders, a tuple of two integers allows different symmetric padding
        per dimension.

        ``'full'`` pads with one less than the filter size on both sides. This
        is equivalent to computing the convolution wherever the input and the
        filter overlap by at least one position.

        ``'same'`` pads with half the filter size (rounded down) on both sides.
        When ``stride=1`` this results in an output size equal to the input
        size. Even filter size is not supported.

        ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).

        Note that ``'full'`` and ``'same'`` can be faster than equivalent
        integer values due to optimizations by Theano.

    untie_biases : bool (default: False)
        If ``False``, the layer will have a bias parameter for each channel,
        which is shared across all positions in this channel. As a result, the
        `b` attribute will be a vector (1D).

        If True, the layer will have separate bias parameters for each
        position in each channel. As a result, the `b` attribute will be a
        3D tensor.

    W : Theano shared variable, expression, numpy array or callable
        Initial value, expression or initializer for the weights.
        These should be a 4D tensor with shape
        ``(num_filters, num_input_channels, filter_rows, filter_columns)``.
        See :func:`lasagne.utils.create_param` for more information.

    b : Theano shared variable, expression, numpy array, callable or ``None``
        Initial value, expression or initializer for the biases. If set to
        ``None``, the layer will have no biases. Otherwise, biases should be
        a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
        ``False``. If it is set to ``True``, its shape should be
        ``(num_filters, output_rows, output_columns)`` instead.
        See :func:`lasagne.utils.create_param` for more information.

    nonlinearity : callable or None
        The nonlinearity that is applied to the layer activations. If None
        is provided, the layer will be linear.

    flip_filters : bool (default: False)
        Whether to flip the filters and perform a convolution, or not to flip
        them and perform a correlation. Flipping adds a bit of overhead, so it
        is disabled by default. In most cases this does not make a difference
        anyway because the filters are learnt. However, ``flip_filters`` should
        be set to ``True`` if weights are loaded into it that were learnt using
        a regular :class:`lasagne.layers.Conv2DLayer`, for example.

    num_groups : int (default: 1)
        The number of groups to split the input channels and output channels
        into, such that data does not cross the group boundaries. Requires the
        number of channels to be divisible by the number of groups, and
        requires Theano 0.10 or later for more than one group.

    **kwargs
        Any additional keyword arguments are passed to the `Layer` superclass.

    Attributes
    ----------
    W : Theano shared variable
        Variable representing the filter weights.

    b : Theano shared variable
        Variable representing the biases.
    """
    def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 pad=0, untie_biases=False, W=init.GlorotUniform(),
                 b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
                 flip_filters=False, num_groups=1, **kwargs):
        super(Conv2DMMLayer, self).__init__(incoming, num_filters, filter_size,
                                            stride, pad, untie_biases, W, b,
                                            nonlinearity, flip_filters,
                                            num_groups, n=2, **kwargs)
        border_mode = 'half' if self.pad == 'same' else self.pad
        extra_kwargs = {'num_groups': num_groups} if num_groups > 1 else {}
        self.corr_mm_op = GpuCorrMM(subsample=self.stride,
                                    border_mode=border_mode,
                                    **extra_kwargs)

    def convolve(self, input, **kwargs):
        filters = self.W
        if self.flip_filters:
            filters = filters[:, :, ::-1, ::-1]  # flip top-down, left-right

        contiguous_filters = gpu_contiguous(filters)
        contiguous_input = gpu_contiguous(input)
        conved = self.corr_mm_op(contiguous_input, contiguous_filters)
        return conved
