/***************************************************************************************************
 * Copyright (c) 2017-2020, NVIDIA CORPORATION.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 *modification, are permitted provided that the following conditions are met:
 *     * Redistributions of source code must retain the above copyright notice,
 *this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copyright
 *notice, this list of conditions and the following disclaimer in the
 *documentation and/or other materials provided with the distribution.
 *     * Neither the name of the NVIDIA CORPORATION nor the names of its
 *contributors may be used to endorse or promote products derived from this
 *software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
 *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
 *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
 *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 **************************************************************************************************/
/*! \file
    \brief Templates implementing computing the addresses of storing of tiles
   from pitch-linear rank=2 tensors.
*/

#pragma once

#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/transform/threadblock/regular_tile_access_iterator.h"

////////////////////////////////////////////////////////////////////////////////

namespace cutlass {
namespace transform {
namespace threadblock {

////////////////////////////////////////////////////////////////////////////////

/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
                                layout::TensorOpMultiplicandCongruous64b,
                                AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(AdvanceRank == 0 || AdvanceRank == 1,
                  "Specialization for pitch-linear iterator may along advance "
                  "along the "
                  "contiguous(rank=0) or strided(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::TensorOpMultiplicandCongruous64b;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    static_assert(ThreadMap::kThreads / 32 > 1,
                  "This tile iterator requires at least two warps.");

    /// Internal details made public to facilitate introspection
    struct Detail {
        /// This iterator is specialized for an access size that is 128 bits in
        /// length.
        static int const kAccessSizeInBits = 64;

        static_assert(
                sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess ==
                        kAccessSizeInBits,
                "This iterator requires a policy whose access size is 64b");

        ///< Number of pointers
        static int const kPointerCount = 1;
    };

    /// Element type per access
    using AccessType = Array<Element, Layout::kElementsPerAccess>;

private:
    //
    // Data members
    //

    /// Stride value
    Index stride_;

    /// Internal pointer to first access of tile
    AccessType* pointer_;

    /// Internal byte offset
    Index byte_offset_;

    /// Iteration in the contiguous dimension
    int iteration_contiguous_;

    /// Iteration in the strided dimension
    int iteration_strided_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : stride_(ref.stride(0) / Layout::kElementsPerAccess),
              byte_offset_(0) {
        layout::PitchLinearCoord thread_offset_base =
                ThreadMap::initial_offset(thread_id);

        // This is the offset of a thread within a threadblock tile for a
        // specific pointer (units of elements)
        layout::PitchLinearCoord thread_offset_in_threadblock_tile =
                thread_offset_base;

        // initialize pointer
        pointer_ = reinterpret_cast<AccessType*>(
                ref.data() + ref.offset(thread_offset_in_threadblock_tile));

        set_iteration_index(0);
    }

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
        iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        byte_offset_ += pointer_offset * sizeof(Element);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        AccessType* access_ptr = pointer_;

        int access_offset =
                iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
                iteration_contiguous_ * ThreadMap::Delta::kContiguous /
                        ThreadMap::kElementsPerAccess;

        char* access_byte_ptr =
                reinterpret_cast<char*>(access_ptr + access_offset);

        return reinterpret_cast<AccessType*>(access_byte_ptr + byte_offset_);
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iteration_contiguous_;

        if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
            return *this;

        // Enter here only if (iteration_contiguous_ ==
        // ThreadMap::Iteration::kContiguous)
        iteration_contiguous_ = 0;
        ++iteration_strided_;

        if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
            return *this;
        }

        // Enter here only if (iteration_stride_ ==
        // ThreadMap::Iteration::kStrided) which means we enter the next tile.
        iteration_strided_ = 0;

        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);

        this->operator++();

        return prev;
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        add_pointer_offset(coord.contiguous() * Shape::kContiguous +
                           coord.strided() * Shape::kStrided * stride_ *
                                   Layout::kElementsPerAccess);
    }
};

////////////////////////////////////////////////////////////////////////////////

/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
        Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCongruous64b,
        AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(AdvanceRank == 0 || AdvanceRank == 1,
                  "Specialization for column-major iterator may along advance "
                  "along the "
                  "columns(rank=0) or rows(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous64b;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    /// Underlying iterator type
    using UnderlyingIterator = RegularTileAccessIterator<
            layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
            layout::TensorOpMultiplicandCongruous64b,
            (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;

    using AccessType = typename UnderlyingIterator::AccessType;

private:
    /// Underlying iterator
    UnderlyingIterator iterator_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : iterator_({ref.data(), ref.stride()}, thread_id) {}

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iterator_.set_iteration_index(index);
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        iterator_.add_pointer_offset(pointer_offset);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        return reinterpret_cast<AccessType*>(iterator_.get());
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        iterator_.add_tile_offset({coord.row(), coord.column()});
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iterator_;
        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);
        ++iterator_;

        return prev;
    }
};

////////////////////////////////////////////////////////////////////////////////

/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
        Shape_, Element_, layout::RowMajorTensorOpMultiplicandCongruous64b,
        AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(
            AdvanceRank == 0 || AdvanceRank == 1,
            "Specialization for row-major iterator may along advance along the "
            "columns(rank=0) or rows(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::RowMajorTensorOpMultiplicandCongruous64b;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    /// Underlying iterator type
    using UnderlyingIterator = RegularTileAccessIterator<
            layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
            layout::TensorOpMultiplicandCongruous64b,
            (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;

    using AccessType = typename UnderlyingIterator::AccessType;

private:
    /// Underlying iterator
    UnderlyingIterator iterator_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : iterator_({ref.data(), ref.stride()}, thread_id) {}

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iterator_.set_iteration_index(index);
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        iterator_.add_pointer_offset(pointer_offset);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        return reinterpret_cast<AccessType*>(iterator_.get());
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        iterator_.add_tile_offset({coord.column(), coord.row()});
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iterator_;
        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);
        ++iterator_;

        return prev;
    }
};

////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////

/// Tile iterator specialized for crosswise arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
                                layout::TensorOpMultiplicand64bCrosswise,
                                AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(AdvanceRank == 0 || AdvanceRank == 1,
                  "Specialization for pitch-linear iterator may along advance "
                  "along the "
                  "contiguous(rank=0) or strided(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::TensorOpMultiplicand64bCrosswise;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    static_assert(ThreadMap::kThreads / 32 > 1,
                  "This tile iterator requires at least two warps.");

    /// Internal details made public to facilitate introspection
    struct Detail {
        /// This iterator is specialized for an access size that is 128 bits in
        /// length.
        static int const kAccessSizeInBits = 64;

        static_assert(
                sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess ==
                        kAccessSizeInBits,
                "This iterator requires a policy whose access size is 64b");

        ///< Number of pointers - two pointers are needed if making more than 4
        ///< iterations along strided dimension
        static int const kPointerCount =
                (ThreadMap::Iterations::kStrided > 4 ? 2 : 1);
    };

    /// Element type per access
    using AccessType = Array<Element, Layout::kElementsPerAccess>;

private:
    //
    // Data members
    //

    /// Stride value
    Index stride_;

    /// Internal pointer to first access of tile
    AccessType* pointer_;

    /// Internal byte offset
    Index byte_offset_[Detail::kPointerCount];

    /// Iteration in the contiguous dimension
    int iteration_contiguous_;

    /// Iteration in the strided dimension
    int iteration_strided_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : stride_(ref.stride(0) / ThreadMap::kElementsPerAccess) {
        layout::PitchLinearCoord thread_offset_base =
                ThreadMap::initial_offset(thread_id);

        // This is the offset of a thread within a threadblock tile for a
        // specific pointer (units of elements)
        layout::PitchLinearCoord thread_offset_in_threadblock_tile =
                thread_offset_base;

        // initialize pointer
        pointer_ = reinterpret_cast<AccessType*>(ref.data());

        byte_offset_[0] =
                ref.offset(thread_offset_in_threadblock_tile) * sizeof(Element);

        if (Detail::kPointerCount == 2) {
            byte_offset_[1] = byte_offset_[0] ^ 8;
        }

        set_iteration_index(0);
    }

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
        iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        pointer_ += pointer_offset / ThreadMap::kElementsPerAccess;
    }

    /// Returns a pointer
    CUTLASS_DEVICE
    AccessType* get() const {
        // Map the logical contiguous and strided access to the internal
        // swizzled structure.
        int uniform_offset = (iteration_strided_ & 0x3) * stride_ +
                             (iteration_strided_ >> 3) * 16;

        char* access_byte_ptr =
                reinterpret_cast<char*>(pointer_ + uniform_offset);

        int byte_offset;

        // This iterator may require two byte offsets if it must load more than
        // 8 rows (or 2 iterations) in the strided dimension
        if (Detail::kPointerCount == 2 && (iteration_strided_ & 0x4)) {
            byte_offset = byte_offset_[1];
        } else {
            byte_offset = byte_offset_[0];
        }

        return reinterpret_cast<AccessType*>(access_byte_ptr + byte_offset);
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iteration_contiguous_;

        if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
            return *this;

        // Enter here only if (iteration_contiguous_ ==
        // ThreadMap::Iteration::kContiguous)
        iteration_contiguous_ = 0;
        ++iteration_strided_;

        if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
            return *this;
        }

        // Enter here only if (iteration_stride_ ==
        // ThreadMap::Iteration::kStrided) which means we enter the next tile.
        iteration_strided_ = 0;

        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);

        this->operator++();

        return prev;
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        add_pointer_offset(coord.strided() * Shape::kStrided +
                           coord.contiguous() * Shape::kContiguous * stride_);
    }
};

////////////////////////////////////////////////////////////////////////////////

/// Tile Iterator specialized for column-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
        Shape_, Element_, layout::ColumnMajorTensorOpMultiplicand64bCrosswise,
        AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(AdvanceRank == 0 || AdvanceRank == 1,
                  "Specialization for column-major iterator may along advance "
                  "along the "
                  "columns(rank=0) or rows(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::ColumnMajorTensorOpMultiplicand64bCrosswise;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    /// Underlying iterator type
    using UnderlyingIterator = RegularTileAccessIterator<
            layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
            layout::TensorOpMultiplicand64bCrosswise,
            (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;

    using AccessType = typename UnderlyingIterator::AccessType;

private:
    /// Underlying iterator
    UnderlyingIterator iterator_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : iterator_({ref.data(), ref.stride()}, thread_id) {}

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iterator_.set_iteration_index(index);
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        iterator_.add_pointer_offset(pointer_offset);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        return reinterpret_cast<AccessType*>(iterator_.get());
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        iterator_.add_tile_offset({coord.row(), coord.column()});
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iterator_;
        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);
        ++iterator_;

        return prev;
    }
};

////////////////////////////////////////////////////////////////////////////////

/// Tile Iterator specialized for row-major crosswise TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
        Shape_, Element_, layout::RowMajorTensorOpMultiplicand64bCrosswise,
        AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(
            AdvanceRank == 0 || AdvanceRank == 1,
            "Specialization for row-major iterator may along advance along the "
            "columns(rank=0) or rows(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::RowMajorTensorOpMultiplicand64bCrosswise;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    /// Underlying iterator type
    using UnderlyingIterator = RegularTileAccessIterator<
            layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
            layout::TensorOpMultiplicand64bCrosswise,
            (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;

    using AccessType = typename UnderlyingIterator::AccessType;

private:
    /// Underlying iterator
    UnderlyingIterator iterator_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : iterator_({ref.data(), ref.stride()}, thread_id) {}

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iterator_.set_iteration_index(index);
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        iterator_.add_pointer_offset(pointer_offset);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        return reinterpret_cast<AccessType*>(iterator_.get());
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        iterator_.add_tile_offset({coord.column(), coord.row()});
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iterator_;
        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);
        ++iterator_;

        return prev;
    }
};

/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////

/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
                                layout::TensorOpMultiplicandCongruous128b,
                                AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(AdvanceRank == 0 || AdvanceRank == 1,
                  "Specialization for pitch-linear iterator may along advance "
                  "along the "
                  "contiguous(rank=0) or strided(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::TensorOpMultiplicandCongruous128b;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    static_assert(ThreadMap::kThreads / 32 > 1,
                  "This tile iterator requires at least two warps.");

    /// Internal details made public to facilitate introspection
    struct Detail {
        /// This iterator is specialized for an access size that is 128 bits in
        /// length.
        static int const kAccessSizeInBits = 128;

        static_assert(
                sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess ==
                        kAccessSizeInBits,
                "This iterator requires a policy whose access size is 128b");

        ///< Number of pointers
        static int const kPointerCount = 1;
    };

    /// Element type per access
    using AccessType = Array<Element, Layout::kElementsPerAccess>;

private:
    //
    // Data members
    //

    /// Stride value
    Index stride_;

    /// Internal pointer to first access of tile
    AccessType* pointer_;

    /// Internal byte offset
    Index byte_offset_;

    /// Iteration in the contiguous dimension
    int iteration_contiguous_;

    /// Iteration in the strided dimension
    int iteration_strided_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : stride_(ref.stride(0) / Layout::kElementsPerAccess),
              byte_offset_(0) {
        layout::PitchLinearCoord thread_offset_base =
                ThreadMap::initial_offset(thread_id);

        // This is the offset of a thread within a threadblock tile for a
        // specific pointer (units of elements)
        layout::PitchLinearCoord thread_offset_in_threadblock_tile =
                thread_offset_base;

        // initialize pointer
        pointer_ = reinterpret_cast<AccessType*>(
                ref.data() + ref.offset(thread_offset_in_threadblock_tile));

        set_iteration_index(0);
    }

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
        iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        byte_offset_ += pointer_offset * sizeof(Element);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        AccessType* access_ptr = pointer_;

        int access_offset =
                iteration_strided_ * ThreadMap::Delta::kStrided * stride_ +
                iteration_contiguous_ * ThreadMap::Delta::kContiguous /
                        ThreadMap::kElementsPerAccess;

        char* access_byte_ptr =
                reinterpret_cast<char*>(access_ptr + access_offset);

        return reinterpret_cast<AccessType*>(access_byte_ptr + byte_offset_);
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iteration_contiguous_;

        if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
            return *this;

        // Enter here only if (iteration_contiguous_ ==
        // ThreadMap::Iteration::kContiguous)
        iteration_contiguous_ = 0;
        ++iteration_strided_;

        if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
            return *this;
        }

        // Enter here only if (iteration_stride_ ==
        // ThreadMap::Iteration::kStrided) which means we enter the next tile.
        iteration_strided_ = 0;

        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);

        this->operator++();

        return prev;
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        add_pointer_offset(coord.contiguous() * Shape::kContiguous +
                           coord.strided() * Shape::kStrided * stride_ *
                                   Layout::kElementsPerAccess);
    }
};

////////////////////////////////////////////////////////////////////////////////

/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
        Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCongruous128b,
        AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(AdvanceRank == 0 || AdvanceRank == 1,
                  "Specialization for column-major iterator may along advance "
                  "along the "
                  "columns(rank=0) or rows(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::ColumnMajorTensorOpMultiplicandCongruous128b;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    /// Underlying iterator type
    using UnderlyingIterator = RegularTileAccessIterator<
            layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
            layout::TensorOpMultiplicandCongruous128b,
            (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;

    using AccessType = typename UnderlyingIterator::AccessType;

private:
    /// Underlying iterator
    UnderlyingIterator iterator_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : iterator_({ref.data(), ref.stride()}, thread_id) {}

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iterator_.set_iteration_index(index);
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        iterator_.add_pointer_offset(pointer_offset);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        return reinterpret_cast<AccessType*>(iterator_.get());
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        iterator_.add_tile_offset({coord.row(), coord.column()});
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iterator_;
        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);
        ++iterator_;

        return prev;
    }
};

////////////////////////////////////////////////////////////////////////////////

/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
        Shape_, Element_, layout::RowMajorTensorOpMultiplicandCongruous128b,
        AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(
            AdvanceRank == 0 || AdvanceRank == 1,
            "Specialization for row-major iterator may along advance along the "
            "columns(rank=0) or rows(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::RowMajorTensorOpMultiplicandCongruous128b;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    /// Underlying iterator type
    using UnderlyingIterator = RegularTileAccessIterator<
            layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
            layout::TensorOpMultiplicandCongruous128b,
            (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;

    using AccessType = typename UnderlyingIterator::AccessType;

private:
    /// Underlying iterator
    UnderlyingIterator iterator_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : iterator_({ref.data(), ref.stride()}, thread_id) {}

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iterator_.set_iteration_index(index);
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        iterator_.add_pointer_offset(pointer_offset);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        return reinterpret_cast<AccessType*>(iterator_.get());
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        iterator_.add_tile_offset({coord.column(), coord.row()});
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iterator_;
        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);
        ++iterator_;

        return prev;
    }
};

/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////

/// Tile iterator specialized for congruous arrangements for TensorOps
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<Shape_, Element_,
                                layout::TensorOpMultiplicandCrosswise128x4,
                                AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(AdvanceRank == 0 || AdvanceRank == 1,
                  "Specialization for pitch-linear iterator may along advance "
                  "along the "
                  "contiguous(rank=0) or strided(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::TensorOpMultiplicandCrosswise128x4;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    static_assert(ThreadMap::kThreads / 32 > 1,
                  "This tile iterator requires at least two warps.");

    /// Internal details made public to facilitate introspection
    struct Detail {
        /// This iterator is specialized for an access size that is 128 bits in
        /// length.
        static int const kAccessSizeInBits = 128;

        static_assert(
                sizeof_bits<Element_>::value * ThreadMap::kElementsPerAccess ==
                        kAccessSizeInBits,
                "This iterator requires a policy whose access size is 128b");

        ///< Number of pointers
        static int const kPointerCount = 1;
    };

    static_assert(!(ThreadMap::Iterations::kStrided % 2),
                  "This iterator requires at least two iterations along the "
                  "strided dimension");

    /// Element type per access
    using AccessType = Array<Element, Layout::kElementsPerAccess>;

private:
    //
    // Data members
    //

    /// Stride value
    Index stride_;

    /// Internal pointer to first access of tile
    AccessType* pointer_;

    /// Internal byte offset
    Index byte_offset_;

    /// Iteration in the contiguous dimension
    int iteration_contiguous_;

    /// Iteration in the strided dimension
    int iteration_strided_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : stride_(ref.stride(0) / Layout::kElementsPerAccess),
              byte_offset_(0) {
        layout::PitchLinearCoord thread_offset_base =
                ThreadMap::initial_offset(thread_id);

        // This is the offset of a thread within a threadblock tile for a
        // specific pointer (units of elements)
        layout::PitchLinearCoord thread_offset_in_threadblock_tile =
                thread_offset_base;

        // initialize pointer
        pointer_ = reinterpret_cast<AccessType*>(
                ref.data() + ref.offset(thread_offset_in_threadblock_tile));

        set_iteration_index(0);
    }

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous;
        iteration_strided_ = index / ThreadMap::Iterations::kContiguous;
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        byte_offset_ += pointer_offset * sizeof(Element);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        AccessType* access_ptr = pointer_;

        int offset_c = (iteration_contiguous_ * ThreadMap::Delta::kContiguous +
                        (iteration_strided_ & 1) * 2);
        int offset_s = (iteration_strided_ / 2) * 8;

        int access_offset = offset_c * stride_ + offset_s;

        char* access_byte_ptr =
                reinterpret_cast<char*>(access_ptr + access_offset);

        return reinterpret_cast<AccessType*>(access_byte_ptr + byte_offset_);
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iteration_contiguous_;

        if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous)
            return *this;

        // Enter here only if (iteration_contiguous_ ==
        // ThreadMap::Iteration::kContiguous)
        iteration_contiguous_ = 0;
        ++iteration_strided_;

        if (iteration_strided_ < ThreadMap::Iterations::kStrided) {
            return *this;
        }

        // Enter here only if (iteration_stride_ ==
        // ThreadMap::Iteration::kStrided) which means we enter the next tile.
        iteration_strided_ = 0;

        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);

        this->operator++();

        return prev;
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        add_pointer_offset(coord.contiguous() * Shape::kContiguous * stride_ +
                           coord.strided() * Shape::kStrided *
                                   Layout::kElementsPerAccess);
    }
};

////////////////////////////////////////////////////////////////////////////////

/// Tile Iterator specialized for column-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
        Shape_, Element_, layout::ColumnMajorTensorOpMultiplicandCrosswise128x4,
        AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(AdvanceRank == 0 || AdvanceRank == 1,
                  "Specialization for column-major iterator may along advance "
                  "along the "
                  "columns(rank=0) or rows(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    /// Underlying iterator type
    using UnderlyingIterator = RegularTileAccessIterator<
            layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, Element,
            layout::TensorOpMultiplicandCrosswise128x4,
            (kAdvanceRank == 0 ? 0 : 1), ThreadMap_>;

    using AccessType = typename UnderlyingIterator::AccessType;

private:
    /// Underlying iterator
    UnderlyingIterator iterator_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : iterator_({ref.data(), ref.stride()}, thread_id) {}

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iterator_.set_iteration_index(index);
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        iterator_.add_pointer_offset(pointer_offset);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        return reinterpret_cast<AccessType*>(iterator_.get());
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        iterator_.add_tile_offset({coord.row(), coord.column()});
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iterator_;
        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);
        ++iterator_;

        return prev;
    }
};

////////////////////////////////////////////////////////////////////////////////

/// Tile Iterator specialized for row-major congruous TensorOp formats.
///
///
/// Satisfies: ForwardTileIteratorConcept |
///            ReadableContiguousTileIteratorConcept |
///            WriteableContiguousTileIteratorConcept
///
template <typename Shape_, typename Element_, int AdvanceRank,
          typename ThreadMap_, int Alignment>
class RegularTileAccessIterator<
        Shape_, Element_, layout::RowMajorTensorOpMultiplicandCrosswise128x4,
        AdvanceRank, ThreadMap_, Alignment> {
public:
    static_assert(
            AdvanceRank == 0 || AdvanceRank == 1,
            "Specialization for row-major iterator may along advance along the "
            "columns(rank=0) or rows(rank=1) dimension.");

    using Shape = Shape_;
    using Element = Element_;
    using Layout = layout::RowMajorTensorOpMultiplicandCrosswise128x4;
    static int const kAdvanceRank = AdvanceRank;
    static int const kAlignment = Alignment;

    using Index = typename Layout::Index;
    using LongIndex = typename Layout::LongIndex;

    using TensorRef = TensorRef<Element, Layout>;
    using TensorCoord = typename Layout::TensorCoord;

    using ThreadMap = ThreadMap_;

    /// Underlying iterator type
    using UnderlyingIterator = RegularTileAccessIterator<
            layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
            layout::TensorOpMultiplicandCrosswise128x4,
            (kAdvanceRank == 0 ? 1 : 0), ThreadMap_>;

    using AccessType = typename UnderlyingIterator::AccessType;

private:
    /// Underlying iterator
    UnderlyingIterator iterator_;

public:
    /// Construct a TileIterator with zero threadblock offset
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator(
            TensorRef ref,  ///< Pointer to start of tensor
            int thread_id   ///< ID of each participating thread
            )
            : iterator_({ref.data(), ref.stride()}, thread_id) {}

    /// Overrides the internal iteration index
    CUTLASS_HOST_DEVICE
    void set_iteration_index(int index) {
        iterator_.set_iteration_index(index);
    }

    /// Adds a pointer offset in units of Element
    CUTLASS_HOST_DEVICE
    void add_pointer_offset(LongIndex pointer_offset) {
        iterator_.add_pointer_offset(pointer_offset);
    }

    /// Returns a pointer
    CUTLASS_HOST_DEVICE
    AccessType* get() const {
        return reinterpret_cast<AccessType*>(iterator_.get());
    }

    /// Adds a tile offset
    CUTLASS_DEVICE
    void add_tile_offset(TensorCoord const& coord) {
        iterator_.add_tile_offset({coord.column(), coord.row()});
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator& operator++() {
        ++iterator_;
        return *this;
    }

    /// Advances to the next tile in memory.
    CUTLASS_HOST_DEVICE
    RegularTileAccessIterator operator++(int) {
        RegularTileAccessIterator prev(*this);
        ++iterator_;

        return prev;
    }
};

/////////////////////////////////////////////////////////////////////////////////////////////////

}  // namespace threadblock
}  // namespace transform
}  // namespace cutlass

/////////////////////////////////////////////////////////////////////////////////////////////////
