#ifndef __CQUEUE_H__
#define __CQUEUE_H__

#include <mutex>
#include <thread>
#if defined(__x86_64__)
#define u64 long
#else
#define u64 long long
#endif

template <typename T>
struct MyData
{
    int status; // 0:没有数据可读，1：有数据
    T data;
    MyData()
    {
        status = 0;
    }
};

// 支持一个线程写，另一个线程读，读写并不互斥，但自动扩容的时候会互斥
// 支持多个线程写，多个线程读， 读写互斥
template <typename T>
class RingBuffer
{
public:
    /* _size:               队列初始长度，会自动扩容
       __is_multi_thread:   是否是多对多模式
    */
    RingBuffer(int64_t _size, bool _is_multi_thread) : size(_size), r_pos(0), w_pos(0), is_multi_thread(_is_multi_thread), mutex_lock()
    {
        if (_size == 0)
            size = 1024;
        this->data = new MyData<T>[size];
    }
    RingBuffer() : size(8), r_pos(0), w_pos(0), is_multi_thread(false), mutex_lock()
    {
        this->data = new MyData<T>[size];
    }
    ~RingBuffer()
    {
        delete[] data;
    }

    int push_task(T node)
    {
        if (is_multi_thread)
            mutex_lock.lock();

        int ret = 0;
        do
        {
            if (w_pos == r_pos && data[w_pos].status == 1) // realloc memory
            {
                if (is_multi_thread == false)
                    mutex_lock.lock();

                MyData<T> *tmp = new MyData<T>[2 * size];
                if (!tmp)
                {
                    ret = -1;
                    break;
                }

                for (int i = r_pos; i <= size - 1; i++)
                {
                    tmp[i - r_pos].data = std::move(data[i].data);
                    tmp[i - r_pos].status = 1;
                }
                for (int i = 0; i < r_pos; i++)
                {
                    tmp[size - r_pos + i].data = std::move(data[i].data);
                    tmp[size - r_pos + i].status = 1;
                }
                w_pos = size;
                r_pos = 0;
                size *= 2;
                delete[] data;
                data = tmp;
            }
            data[w_pos].status = 1;
            data[w_pos].data = node;
            w_pos++;
            if (w_pos == size)
                w_pos = 0;

            if (is_multi_thread == false)
                mutex_lock.unlock();

        } while (0);

        if (is_multi_thread)
            mutex_lock.unlock();

        return ret;
    }

    int get_task(T &out_data)
    {
        mutex_lock.lock();
        int ret = 0;
        if (data[r_pos].status == 1)
        {
            out_data = data[r_pos].data;
            if (r_pos == size)
                r_pos = 0;
        }
        else
        {
            ret = -1;
        }
        mutex_lock.unlock();
        return ret;
    }

    int get_task2(T &out_data)
    {
        mutex_lock.lock();
        int ret = 0;
        if (data[r_pos].status == 1)
        {
            out_data = data[r_pos].data;
            data[r_pos].status = 0;
            r_pos++;
            if (r_pos == size)
                r_pos = 0;
        }
        else
        {
            ret = -1;
        }
        mutex_lock.unlock();
        return ret;
    }

    void done_task()
    {
        data[r_pos].status = 0;
        r_pos++;
        if (r_pos >= size)
        {
            r_pos = 0;
        }
    }

    void set_nonread(int count)
    {
        non_read = count;
    }

    int64_t get_size()
    {
        std::lock_guard<std::mutex> lock(mutex_lock);
        if (r_pos > w_pos)
            return size - r_pos + w_pos;
        else
            return w_pos - r_pos;
    }

    int get_nonread()
    {
        return non_read;
    }

private:
    int64_t size;
    int64_t r_pos;
    int64_t w_pos;
    MyData<T> *data;
    std::mutex mutex_lock;
    bool is_multi_thread;
    int non_read = 0;
};
#endif
