/*!
 * \file lock_free_queue.hpp
 * \project AI_CTP
 * 
 * \brief 高性能无锁队列实现，专为CTP数据传输优化
 *        基于SPSC（单生产者单消费者）模型，适合CTP回调场景
 */
#pragma once

#include <atomic>
#include <memory>
#include <cassert>
#include <cstring>

namespace ctp_api {
namespace performance {

// 单生产者单消费者无锁队列（适合CTP回调场景）
template<typename T, size_t Capacity = 8192>
class SPSCQueue {
private:
    static constexpr size_t kCapacity = Capacity;
    static_assert((kCapacity & (kCapacity - 1)) == 0, "Capacity must be power of 2");
    
    struct alignas(64) {
        std::atomic<size_t> write_pos{0};
    };
    
    struct alignas(64) {
        std::atomic<size_t> read_pos{0};
    };
    
    alignas(64) T buffer_[kCapacity];
    
public:
    SPSCQueue() = default;
    ~SPSCQueue() = default;
    
    // 禁止拷贝和移动
    SPSCQueue(const SPSCQueue&) = delete;
    SPSCQueue& operator=(const SPSCQueue&) = delete;
    SPSCQueue(SPSCQueue&&) = delete;
    SPSCQueue& operator=(SPSCQueue&&) = delete;
    
    // 生产者接口（CTP回调线程调用）
    bool try_push(const T& item) noexcept {
        const size_t current_write = write_pos_.load(std::memory_order_relaxed);
        const size_t next_write = (current_write + 1) & (kCapacity - 1);
        
        if (next_write == read_pos_.load(std::memory_order_acquire)) {
            return false; // 队列满
        }
        
        buffer_[current_write] = item;
        write_pos_.store(next_write, std::memory_order_release);
        return true;
    }
    
    // 支持移动语义的push
    bool try_push(T&& item) noexcept {
        const size_t current_write = write_pos_.load(std::memory_order_relaxed);
        const size_t next_write = (current_write + 1) & (kCapacity - 1);
        
        if (next_write == read_pos_.load(std::memory_order_acquire)) {
            return false; // 队列满
        }
        
        buffer_[current_write] = std::move(item);
        write_pos_.store(next_write, std::memory_order_release);
        return true;
    }
    
    // 消费者接口（用户线程调用）
    bool try_pop(T& item) noexcept {
        const size_t current_read = read_pos_.load(std::memory_order_relaxed);
        if (current_read == write_pos_.load(std::memory_order_acquire)) {
            return false; // 队列空
        }
        
        item = std::move(buffer_[current_read]);
        read_pos_.store((current_read + 1) & (kCapacity - 1), std::memory_order_release);
        return true;
    }
    
    // 状态查询
    bool empty() const noexcept {
        return read_pos_.load(std::memory_order_acquire) == 
               write_pos_.load(std::memory_order_acquire);
    }
    
    size_t size() const noexcept {
        const size_t write = write_pos_.load(std::memory_order_acquire);
        const size_t read = read_pos_.load(std::memory_order_acquire);
        return (write - read) & (kCapacity - 1);
    }
    
    // 获取容量利用率（用于监控）
    double utilization() const noexcept {
        return static_cast<double>(size()) / kCapacity;
    }

private:
    alignas(64) std::atomic<size_t> write_pos_{0};
    alignas(64) std::atomic<size_t> read_pos_{0};
};

// 多生产者单消费者队列（支持多个数据源）
template<typename T, size_t Capacity = 8192>
class MPSCQueue {
private:
    static constexpr size_t kCapacity = Capacity;
    static_assert((kCapacity & (kCapacity - 1)) == 0, "Capacity must be power of 2");
    
    struct Node {
        std::atomic<Node*> next{nullptr};
        T data;
        
        Node() = default;
        explicit Node(const T& item) : data(item) {}
        explicit Node(T&& item) : data(std::move(item)) {}
    };
    
    alignas(64) std::atomic<Node*> head_;
    alignas(64) std::atomic<Node*> tail_;
    
public:
    MPSCQueue() {
        Node* dummy = new Node();
        head_.store(dummy);
        tail_.store(dummy);
    }
    
    ~MPSCQueue() {
        while (Node* old_head = head_.load()) {
            head_.store(old_head->next);
            delete old_head;
        }
    }
    
    // 多生产者接口
    void push(const T& item) {
        Node* new_node = new Node(item);
        Node* prev_tail = tail_.exchange(new_node);
        prev_tail->next.store(new_node);
    }
    
    void push(T&& item) {
        Node* new_node = new Node(std::move(item));
        Node* prev_tail = tail_.exchange(new_node);
        prev_tail->next.store(new_node);
    }
    
    // 单消费者接口
    bool try_pop(T& item) {
        Node* head = head_.load();
        Node* next = head->next.load();
        
        if (next == nullptr) {
            return false;
        }
        
        item = std::move(next->data);
        head_.store(next);
        delete head;
        return true;
    }
    
    bool empty() const {
        Node* head = head_.load();
        return head->next.load() == nullptr;
    }
};

// 批量处理队列（适合行情数据批量处理）
template<typename T, size_t BatchSize = 100, size_t Capacity = 8192>
class BatchQueue {
private:
    SPSCQueue<T, Capacity> queue_;
    
public:
    bool try_push(const T& item) noexcept {
        return queue_.try_push(item);
    }
    
    bool try_push(T&& item) noexcept {
        return queue_.try_push(std::move(item));
    }
    
    // 批量弹出，提高处理效率
    size_t try_pop_batch(T* items, size_t max_count) noexcept {
        size_t count = 0;
        const size_t target = std::min(max_count, BatchSize);
        
        while (count < target && queue_.try_pop(items[count])) {
            ++count;
        }
        
        return count;
    }
    
    bool empty() const noexcept {
        return queue_.empty();
    }
    
    size_t size() const noexcept {
        return queue_.size();
    }
    
    double utilization() const noexcept {
        return queue_.utilization();
    }
};

} // namespace performance
} // namespace ctp_api 