#include <atomic>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <memory>

using uint32_t = unsigned int;
using uint8_t = unsigned char;

template <typename T> class SPMCQueue {
private:
  using BlockVersion = uint32_t;
  using MessageSize = uint32_t;

  // Block structure for V2 implementation
  struct Block {
    alignas(64) std::atomic<BlockVersion> mVersion;
    std::atomic<MessageSize> mSize;
    alignas(8) uint8_t mData[0];

    Block() : mVersion(0), mSize(0) {}
  };

  // Queue header
  struct Header {
    alignas(64) std::atomic<uint64_t> mWriteIndex;
    alignas(64) std::atomic<uint64_t> mPendingIndex;
    alignas(64) std::atomic<uint64_t> mBlockCounter;
    alignas(64) Block mBlocks[0];

    Header() : mWriteIndex(0), mPendingIndex(0), mBlockCounter(0) {}
  };

  Header *mHeader;
  uint64_t mCapacity;
  uint64_t mBlockSize;
  uint64_t mLastReadIndex;

  // Calculate block index from linear index
  uint64_t getBlockIndex(uint64_t index) const {
    return index & (mCapacity - 1);
  }

  // Get block at specific index
  Block *getBlock(uint64_t index) const {
    return reinterpret_cast<Block *>(
        reinterpret_cast<uint8_t *>(mHeader->mBlocks) +
        getBlockIndex(index) * mBlockSize);
  }

public:
  // Constructor - capacity must be power of 2
  SPMCQueue(uint64_t capacity, uint64_t blockSize = 1024)
      : mCapacity(capacity), mBlockSize(blockSize), mLastReadIndex(0) {

    assert((capacity & (capacity - 1)) == 0); // Power of 2 check

    size_t totalSize = sizeof(Header) + capacity * blockSize;
    void *memory = aligned_alloc(64, totalSize);
    mHeader = new (memory) Header();

    // Initialize all blocks
    for (uint64_t i = 0; i < capacity; ++i) {
      Block *block = getBlock(i);
      new (block) Block();
    }
  }

  ~SPMCQueue() { std::free(mHeader); }

  // Producer: Push message (Single Producer)
  template <typename Callback>
  bool push(MessageSize size, Callback &&writeCallback) {
    if (size > mBlockSize - sizeof(Block)) {
      return false; // Message too large
    }

    // V1 approach: Get write index
    uint64_t writeIndex = mHeader->mWriteIndex.load(std::memory_order_relaxed);
    uint64_t nextIndex = writeIndex + 1;

    // Check if queue is full
    uint64_t pendingIndex =
        mHeader->mPendingIndex.load(std::memory_order_acquire);
    if (nextIndex - pendingIndex >= mCapacity) {
      return false; // Queue full
    }

    // Reserve the slot
    mHeader->mWriteIndex.store(nextIndex, std::memory_order_relaxed);

    Block *block = getBlock(writeIndex);

    // V2 approach: Use per-block version counter
    BlockVersion version = block->mVersion.load(std::memory_order_relaxed);

    // Set write in progress (even version = in progress)
    block->mVersion.store(version, std::memory_order_relaxed);
    block->mSize.store(size, std::memory_order_relaxed);

    // Write data
    writeCallback(block->mData, size);

    // Mark write complete (odd version = complete)
    block->mVersion.store(version + 1, std::memory_order_release);

    // Update global counter for readers
    mHeader->mBlockCounter.fetch_add(1, std::memory_order_acq_rel);

    return true;
  }

  // Convenience method for simple data
  bool push(const T &data) {
    return push(sizeof(T), [&](uint8_t *buffer, MessageSize size) {
      std::memcpy(buffer, &data, size);
    });
  }

  // Consumer: Pop message (Multiple Consumers)
  template <typename Callback> bool pop(Callback &&readCallback) {
    // Check if we're caught up
    uint64_t blockCounter =
        mHeader->mBlockCounter.load(std::memory_order_acquire);
    if (mLastReadIndex >= blockCounter) {
      return false; // No new data
    }

    Block *block = getBlock(mLastReadIndex);

    // V2 approach: Check block version
    BlockVersion version = block->mVersion.load(std::memory_order_acquire);
    if (version % 2 == 0) {
      return false; // Write in progress
    }

    MessageSize size = block->mSize.load(std::memory_order_relaxed);
    if (size == 0) {
      return false; // No data
    }

    // Read data
    readCallback(block->mData, size);

    // Advance read index
    mLastReadIndex++;

    // Update pending index (helps producer detect space)
    mHeader->mPendingIndex.store(mLastReadIndex, std::memory_order_release);

    return true;
  }

  // Convenience method for simple data
  bool pop(T &data) {
    return pop([&](const uint8_t *buffer, MessageSize size) {
      if (size == sizeof(T)) {
        std::memcpy(&data, buffer, size);
      }
    });
  }

  // Get current queue size (approximate)
  uint64_t size() const {
    uint64_t writeIndex = mHeader->mWriteIndex.load(std::memory_order_relaxed);
    uint64_t pendingIndex =
        mHeader->mPendingIndex.load(std::memory_order_relaxed);
    return writeIndex - pendingIndex;
  }

  // Check if queue is empty
  bool empty() const { return size() == 0; }

  // Check if queue is full
  bool full() const { return size() >= mCapacity; }
};

// Usage example
#include <iostream>
#include <thread>
#include <vector>

void example_usage() {
  const int QUEUE_SIZE = 1024; // Must be power of 2
  const int NUM_CONSUMERS = 4;
  const int NUM_MESSAGES = 10000;

  SPMCQueue<int> queue(QUEUE_SIZE);
  std::atomic<int> totalConsumed{0};
  std::atomic<bool> done{false};

  // Producer thread
  std::thread producer([&]() {
    for (int i = 0; i < NUM_MESSAGES; ++i) {
      while (!queue.push(i)) {
        std::this_thread::yield();
      }
    }
    done.store(true);
  });

  // Consumer threads
  std::vector<std::thread> consumers;
  for (int i = 0; i < NUM_CONSUMERS; ++i) {
    consumers.emplace_back([&, i]() {
      int value;
      int localCount = 0;

      while (!done.load() || !queue.empty()) {
        if (queue.pop(value)) {
          localCount++;
          // Process value...
        } else {
          std::this_thread::yield();
        }
      }

      totalConsumed.fetch_add(localCount);
      std::cout << "Consumer " << i << " processed " << localCount
                << " messages\n";
    });
  }

  producer.join();
  for (auto &consumer : consumers) {
    consumer.join();
  }

  std::cout << "Total produced: " << NUM_MESSAGES << std::endl;
  std::cout << "Total consumed: " << totalConsumed.load() << std::endl;
}

// Alternative V1-style implementation (simpler but with more contention)
template <typename T> class SPMCQueueV1 {
private:
  struct alignas(64) {
    std::atomic<uint64_t> mIndex;
    std::atomic<uint64_t> mPendingIndex;
    uint8_t mData[0];
  } * mHeader;

  uint64_t mCapacity;
  uint64_t mElementSize;
  uint64_t mLastReadIndex;

public:
  SPMCQueueV1(uint64_t capacity)
      : mCapacity(capacity), mElementSize(sizeof(T)), mLastReadIndex(0) {

    size_t totalSize = sizeof(*mHeader) + capacity * mElementSize;
    void *memory = aligned_alloc(64, totalSize);
    mHeader = static_cast<decltype(mHeader)>(memory);
    mHeader->mIndex.store(0);
    mHeader->mPendingIndex.store(0);
  }

  ~SPMCQueueV1() { std::free(mHeader); }

  bool push(const T &item) {
    uint64_t index = mHeader->mIndex.fetch_add(1, std::memory_order_relaxed);
    uint64_t pos = index & (mCapacity - 1);

    // Check overflow
    uint64_t pending = mHeader->mPendingIndex.load(std::memory_order_acquire);
    if (index - pending >= mCapacity) {
      return false;
    }

    T *slot = reinterpret_cast<T *>(mHeader->mData + pos * mElementSize);
    *slot = item;

    return true;
  }

  bool pop(T &item) {
    uint64_t index = mHeader->mIndex.load(std::memory_order_acquire);
    if (mLastReadIndex >= index) {
      return false;
    }

    uint64_t pos = mLastReadIndex & (mCapacity - 1);
    T *slot = reinterpret_cast<T *>(mHeader->mData + pos * mElementSize);
    item = *slot;

    mLastReadIndex++;
    mHeader->mPendingIndex.store(mLastReadIndex, std::memory_order_release);

    return true;
  }
};