#pragma once

#include <atomic>
#include <vector>
#include <stdexcept>

#include "thread_safe_arena.h"

namespace good_cptl
{

template<typename T, size_t Capacity>
class MPMCQueue
{
private:
  MPMCQueue(const MPMCQueue& queue) = delete;
  MPMCQueue& operator=(const MPMCQueue& queue) = delete;
  MPMCQueue(MPMCQueue&& queue) = delete;
private:
  struct alignas(64) Slot
  {
    std::atomic<size_t> cycle;
    T data;
  };
  // 环形缓冲区
  std::vector<Slot> buffer_;
  // 生产者和消费者的下标
  // 原子索引（独立缓存行）
  alignas(64) std::atomic<size_t> tail_{0};
  alignas(64) std::atomic<size_t> head_{0};
public:
  size_t size()
  {
    size_t head = head_.load(std::memory_order_acquire);
    size_t tail = tail_.load(std::memory_order_relaxed);
    return (tail - head + Capacity) % Capacity;
  }
  bool empty() const
  {
    return size() == 0;
  }
public:
  MPMCQueue() : buffer_(Capacity)
  {
    if (Capacity < 1 || (Capacity & (Capacity - 1)) != 0) {
      throw std::invalid_argument("Capacity必须是2的幂次方");
    }
    for (size_t i = 0; i < Capacity; ++i) {
      buffer_[i].cycle.store(i, std::memory_order_relaxed);
    }
  }

  // 入队（多生产者安全）
  bool enqueue(const T& value)
  {
    size_t tail = tail_.load(std::memory_order_relaxed);
    while (true) {
      Slot& slot = buffer_[tail & (Capacity - 1)];
      size_t cycle = slot.cycle.load(std::memory_order_acquire);

      if (cycle == tail) {
        if (tail_.compare_exchange_weak(tail, tail + 1, std::memory_order_relaxed, std::memory_order_relaxed)) {
          slot.data = value;
          slot.cycle.store(tail + 1, std::memory_order_release);
          return true;
        }
      } else if (static_cast<ssize_t>(cycle - tail) < 0) {
        return false;
      } else {
        tail = tail_.load(std::memory_order_relaxed);
      }
    }
  }
  bool enqueue(T&& value)
  {
    size_t tail = tail_.load(std::memory_order_relaxed);
    while (true) {
      Slot& slot = buffer_[tail & (Capacity - 1)];
      size_t cycle = slot.cycle.load(std::memory_order_acquire);

      // 检查槽是否为空
      if (cycle == tail) {
        if (tail_.compare_exchange_weak(tail, tail + 1, std::memory_order_relaxed, std::memory_order_relaxed)) {
          slot.data = std::move(value);
          slot.cycle.store(tail + 1, std::memory_order_release);
          return true;
        }
      } else if (static_cast<ssize_t>(cycle - tail) < 0) {
        return false;
      } else {
        tail = tail_.load(std::memory_order_relaxed);
      }
    }
  }

  // 出队
  bool dequeue(T& value)
  {
    size_t head = head_.load(std::memory_order_relaxed);
    while (true) {
      Slot& slot = buffer_[head & (Capacity - 1)];
      size_t cycle = slot.cycle.load(std::memory_order_acquire);

      // 检查槽位是否可读
      if (cycle == head + 1) {
        if (head_.compare_exchange_weak(head, head + 1, std::memory_order_relaxed, std::memory_order_relaxed)) {
          value = std::move(slot.data);
          slot.cycle.store(head + Capacity, std::memory_order_release);
          return true;
        }
      } else if (static_cast<ssize_t>(cycle - head - 1) < 0) {
        return false;
      } else {
        head = head_.load(std::memory_order_relaxed);
      }
    }
    return false;
  }
};

template<typename T>
class DynamicMPMCQueue
{
public:
  struct Block
  {
    std::atomic<size_t> cycle;
    T data;
    Block() : cycle(0) {}
  };

  struct Segment
  {
    const size_t capacity;
    const size_t pre_acclume_index;
    Block* block;
    std::atomic<Segment*> next{nullptr};
    SysThreadSafeArena& alloc_;

    Segment(size_t cap,
            size_t init_tail,
            SysThreadSafeArena& alloc)
      : capacity(cap)
      , pre_acclume_index(init_tail)
      , alloc_(alloc) {
      block = reinterpret_cast<Block*>(alloc_.allocate(cap * sizeof(Block)));
      if (block == nullptr) {
        throw std::bad_alloc();
      }
      for (size_t i = init_tail; i < cap + init_tail; i++) {
        block[i - init_tail].cycle.store(i, std::memory_order_relaxed);
      }
    }

    ~Segment() { alloc_.deallocate(reinterpret_cast<char*>(block), capacity * sizeof(Block)); }

    size_t get_inline_block_index(size_t index)
    {
      if (index >= capacity) {
        index = (index & (capacity - 1));
      }
      return index - pre_acclume_index;
    }
  };

  alignas(64) std::atomic<size_t> head_{0};      // 消费者索引
  alignas(64) std::atomic<size_t> tail_{0};      // 生产者索引
  alignas(64) std::atomic<Segment*> head_segment_{nullptr};
  alignas(64) std::atomic<Segment*> tail_segment_{nullptr};
  size_t initial_capacity_;

  // 扩容锁（仅用于扩容时的同步）
  std::mutex expand_mutex_;
  SysThreadSafeArena& alloc_;
public:
  explicit DynamicMPMCQueue(size_t initial_capacity,
                           SysThreadSafeArena& alloc)
  : initial_capacity_(initial_capacity)
  , alloc_(alloc)
  {
    if (initial_capacity < 1 || (initial_capacity & (initial_capacity - 1)) != 0) {
      throw std::invalid_argument("initial_capacity must be a power of 2");
    }
    void* mem = alloc_.allocate(sizeof(Segment));
    Segment* seg = new (mem) Segment(initial_capacity, 0, alloc_);
    head_segment_.store(seg, std::memory_order_relaxed);
    tail_segment_.store(seg, std::memory_order_relaxed);
  }

  ~DynamicMPMCQueue()
  {
    Segment* seg = head_segment_.load(std::memory_order_relaxed);
    while (seg) {
      Segment* next = seg->next.load(std::memory_order_relaxed);
      alloc_.deallocate(reinterpret_cast<char*>(seg), sizeof(Segment));
      seg = next;
    }
  }

  bool enqueue(const T& value)
  {
    size_t tail = tail_.fetch_add(1, std::memory_order_relaxed);
    Segment* seg = tail_segment_.load(std::memory_order_acquire);
    while (true) {
      const size_t index = seg->get_inline_block_index(tail & (seg->capacity - 1));
      Block& block = seg->block[index];
      size_t cycle = block.cycle.load(std::memory_order_acquire);
      // std::cout << "index: " << index << std::endl;
      // std::cout << "tail: " << tail << std::endl;
      // std::cout << "cycle: " << cycle << std::endl;
      // std::cout << "seg->capacity: " << seg->capacity << std::endl;
      // std::cout << "seg->block[0].cycle: " << seg->block[0].cycle << std::endl;
      // std::cout << "seg->block[1].cycle: " << seg->block[1].cycle << std::endl;
      if (cycle == tail) {
        block.data = value;
        block.cycle.store(tail + 1, std::memory_order_release);
        return true;
      } else if (static_cast<ssize_t>(cycle - tail) < 0) {
        // 队列已满，尝试扩容
        if (!expand_queue(tail, seg)) {
          return false; // 无法扩容
        }
        seg = tail_segment_.load(std::memory_order_acquire);
        std::cout << "expand queue" << std::endl;
      } else {
        tail = tail_.load(std::memory_order_relaxed);
      }
    }
  }

  bool dequeue(T& item)
  {
    size_t head = head_.fetch_add(1, std::memory_order_relaxed);
    Segment* seg = head_segment_.load(std::memory_order_acquire);
    while (true) {
      const size_t index = seg->get_inline_block_index(head & (seg->capacity - 1));
      Block& block = seg->block[index];
      size_t cycle = block.cycle.load(std::memory_order_acquire);
      // std::cout << "head: " << head << std::endl;
      // std::cout << "cycle: " << cycle << std::endl;
      // std::cout << "index: " << index << std::endl;
      if (cycle == head + 1) {
        item = std::move(block.data);
        block.cycle.store(head + seg->capacity, std::memory_order_release);
        return true;
      } else if (static_cast<ssize_t>(cycle - head - 1) < 0) {
        Segment* next_seg = seg->next.load(std::memory_order_acquire);
        if (!next_seg) {
          return false;
        }
        head_segment_.store(next_seg, std::memory_order_release);
        alloc_.deallocate(seg, sizeof(Segment));
        seg = next_seg;
      } else {
        head = head_.load(std::memory_order_relaxed);
      }
    }
  }

  Segment* get_head_segment() { return head_segment_.load(std::memory_order_relaxed);}

  size_t size() { return tail_ - head_;}
  bool empty() { return size() == 0; }
private:
  bool expand_queue(size_t tail, Segment* old_seg)
  {
    std::lock_guard<std::mutex> lock(expand_mutex_);
    // 检查是否已经有其他线程进行了扩容
    // 双重锁定检查
    if (tail_segment_.load(std::memory_order_relaxed) != old_seg) {
      return false; // 其他线程已经扩容完成
    }

    const size_t new_capacity = compute_new_size(old_seg->capacity);
    void* mem = alloc_.allocate(sizeof(Segment));
    Segment* new_seg = new (mem) Segment(new_capacity, tail, alloc_);

    old_seg->next.store(new_seg, std::memory_order_release);
    tail_segment_.store(new_seg, std::memory_order_release);
    return true;
  }

  // 双倍扩容，保证容量是 2 的幂次
  size_t compute_new_size(const size_t cap) const
  {
    size_t new_size = cap * 2;
    return std::min(max_segment_size(), new_size);
  }

  size_t max_segment_size() const
  {
    // 计算 Segment 能容的最大元素数量
    return 4096 * 4096;
  }
};
}