//
// Created by DELL on 2024/11/25.
//

/// https://github.com/cameron314/concurrentqueue
///
/// A Simple Lock-free Ring Buffer
/// https://kmdreko.github.io/posts/20191003/a-simple-lock-free-ring-buffer/
/// https://github.com/kmdreko/wilt-ring/blob/master/wilt-ring/ring.cpp
///
///

#include <atomic>
#include <barrier>
#include <iostream>
#include <vector>
#include <memory>
#include <stdexcept>
#include <thread>

template <typename T>
class LockFreeCircularQueue {
private:
    std::vector<std::shared_ptr<T>> buffer;
    std::atomic<size_t> head;
    std::atomic<size_t> tail;
    const size_t maxSize;

public:
    explicit LockFreeCircularQueue(size_t size)
        : buffer(size), head(0), tail(0), maxSize(size) {}

    bool enqueue(const T& data) {
        size_t currentTail = tail.load(std::memory_order_relaxed);
        size_t nextTail = (currentTail + 1) % maxSize;

        if (nextTail == head.load(std::memory_order_acquire)) {
            // Queue is full
            return false;
        }

        buffer[currentTail] = std::make_shared<T>(data);
        tail.store(nextTail, std::memory_order_release);
        return true;
    }

    std::shared_ptr<T> dequeue() {
        size_t currentHead = head.load(std::memory_order_relaxed);

        if (currentHead == tail.load(std::memory_order_acquire)) {
            // Queue is empty
            return std::shared_ptr<T>();
        }

        auto data = buffer[currentHead];
        head.store((currentHead + 1) % maxSize, std::memory_order_release);
        return data;
    }
};


template <typename T>
class LockFreeQueue {
    struct Node {
        std::shared_ptr<T> data;
        Node* next;

        Node(T const& data_):
            data(std::make_shared<T>(data_)),
            next(nullptr) {}
    };

    std::atomic<Node*> head;
    std::atomic<Node*> tail;

public:
    LockFreeQueue() : head(new Node(T())), tail(head.load()) {}

    ~LockFreeQueue() {
        while (Node* old_head = head.load()) {
            head.store(old_head->next);
            delete old_head;
        }
    }

    void enqueue(T const& data) {
        Node* new_node = new Node(data);
        Node* old_tail = tail.load();
        old_tail->next = new_node;
        tail.store(new_node);
    }

    std::shared_ptr<T> dequeue() {
        Node* old_head = head.load();
        if (old_head == tail.load()) {
            return std::shared_ptr<T>(); // Queue is empty
        }
        head.store(old_head->next);
        return old_head->data;
    }
};


/// https://jbseg.medium.com/lock-free-queues-e48de693654b
template<typename T>
class lock_free_queue
{
private:
    struct node
    {
        std::shared_ptr<T> data;
        std::atomic<node*> next;
        node(T const& data_):
            data(std::make_shared<T>(data_))
        {}
    };
    std::atomic<node*> head;
    std::atomic<node*> tail;
public:
    void push(T const& data)
    {
        std::atomic<node*> const new_node=new node(data);
        node* old_tail = tail.load();
        while(!old_tail->next.compare_exchange_weak(nullptr, new_node)){
            node* old_tail = tail.load();
        }
        tail.compare_exchange_weak(old_tail, new_node);
    }
    std::shared_ptr<T> pop()
    {
        node* old_head=head.load();
        while(old_head && !head.compare_exchange_weak(old_head,old_head->next)){
            old_head=head.load();
        }
        return old_head ? old_head->data : std::shared_ptr<T>();
    }
};



class lockFreeQueue
{
public:
    struct Node
    {
        int data{};
        std::atomic<Node*> next{};
    };

    lockFreeQueue() = default;
    ~lockFreeQueue()
    {
        auto h = head.load();
        while (h)
        {
            Node* tmp = h;
            h = h->next.load();
            delete tmp;
        }
    }

    bool isEmpty() const {
        return head == nullptr;
    }

    void enqueue(int data)
    {
        auto tmp = new Node(data);
        Node* old = nullptr;
        if (tail.compare_exchange_weak(old, tmp))
        {
            old = nullptr;
            head.compare_exchange_weak(old, tmp);
            return;
        }


        Node* oldNext{};
        do
        {
            old = tail.load();
        } while (!old->next.compare_exchange_weak(old, tmp));

    }

    int dequeue()
    {
        Node* tmp{};
        Node* newHead{};

        do
        {
            tmp = head.load();
            if (tmp == tail.load())
            {
                return -1;
            }
            newHead = tmp->next.load();
        } while (!head.compare_exchange_weak(tmp, newHead));

        int data = tmp->data;
        delete tmp;
        return data;
    }

private:

    std::atomic<Node*> head{};
    std::atomic<Node*> tail{};

};

int main()
{
    lockFreeQueue queue;

    constexpr int pushCount = 10000;
    std::thread threads[10];
    std::barrier sync_point(std::size(threads));

    for (auto & t : threads)
    {
        t = std::thread([&](){
            sync_point.arrive_and_wait();
            for (int j = 0; j < pushCount; ++j)
            {
                queue.enqueue(1);
            }
        });
    }

    for (auto& thread : threads)
    {
        thread.join();
    }

    std::atomic<int> count = 0;

    while (!queue.isEmpty())
    {
        queue.dequeue();
        count.fetch_add(1);
    }
    std::cout << "except: " << std::size(threads) * pushCount << ",  count:" << count << std::endl;
    return 0;

    std::barrier sync_point2(std::size(threads));
    for (auto & i : threads)
    {
        i = std::thread([&](){
            sync_point2.arrive_and_wait();
            for (int j = 0; j < pushCount; ++j)
            {
                if (!queue.isEmpty())
                {
                    queue.dequeue();
                    count.fetch_add(1);
                }
            }
        });
    }

    for (auto & thread : threads)
    {
        thread.join();
    }

    std::cout << "except: " << std::size(threads) * pushCount << ",  count:" << count << std::endl;

    return 0;
}