#pragma once

#include <atomic>
#include <memory>
#include <vector>

#include <glog/logging.h>
#include <gflags/gflags.h>

namespace hawking {
namespace indexlib {

DECLARE_int32(recycle_array_slot_bytes);

class OffsetSlot {
public:
    OffsetSlot() = default;

    void IncrRef() {
        ref_.fetch_add(1, std::memory_order_acq_rel);
    }

    void DecrRef() {
        ref_.fetch_sub(1, std::memory_order_acq_rel);
    }

    size_t Length() const {
        return length_;
    }

    void SetLength(size_t length) {
        length_ = length;
    }

    int32_t Ref() const {
        return ref_.load(std::memory_order_acq_rel);
    }

private:
    int32_t length_ = 0;
    std::atomic<int32_t> ref_;
};

/*
    this is every disk-segment-doc's one attribute's update component.
    high-level codes construct it only when one disk-segment-ddoc's one attribute referencing update
    no update, no need it.

    in default, every doc has a 1K(256 slots, 4 bytes per slot) update-memory per attribute.
    the limit could setup it by construct-function. slots_ is unchanged after init.

    update-codes should use `Insert` to insert the update-data to here,
    and record the offset in offset-file,
    recommend new offset in offset-file = datafile-size + offset-here that easy to diff.
    here would insert the data at a suitable location in array_,
    and update the new offset to cur_offset_idx_, and return it to external codes for record.

    if consecutive-frequently-update to only one disk-segment-doc's one attribute,
    and the read qps is also high and slow-processed, that they still hold the inf of old-updates,
    maybe could not find suitable room to update, Insert would return -1.

    old-update could be obsoleted if no readers referenced,
    external codes should use `Get` to incr inf and get the updated-data,
    and use `Release` to decr inf after they use finish.

    new data-update would never use current-udpate's slots, no matter its inference is what,
    new data-upadte would fail when its slots has Ref or its slots overlao the current-update's slots

    the typical order is as three-buffer-switch:
    1. single update thread update the data and return a offset;
    2. disk attribute index receive the offset and record it with `datafile-size + offset` to offset-file
    3. then read threads maybe do `Get` when use and do `Release` after use
    4. same as 1
    5. same sa 2, new update-data this time, would never overlap current slots, it not enough, update fail
    6. read threads may get 1st-update's offset, may get 2nd-update's offset, all of them could work
    7. same as 1
    8. same as 2, if no Ref with 1st-update, could overlap here, if still has Ref, update fail.

    so external codes should call `Release` after the data had been copied to high-level codes, that would not use.

    this class maintain the newest offset(cur_offset_idx_) from doc's attribute-update only itself,
    and single-thread update mechanism, so without ABA problem.

    the unittest show a scene that,
    every doc's data's random update, and multi reader traverse all docs, offset is read and write in a mmap file.

    compare to hold a bigger array for all docs, this would avoid a core problem that,
    some old-update-data maybe without Ref, but may be used while the new update may overlap their slots,
    because of in externa codes, offset's update hard to precise updated while no-lock,
    that the timeval is hard to keep without trigger problem.

    SimpleRecycleArray's big negative: if array_ size is small, and one update is largger than 1/3 of array_size,
    and at about middle location in array_, would lead could not update forever.
    so you'd better not make your update-data bigger than 1/3 of array_'s size.

    to external codes, if a great deal of docs has been updated by using SimpleRecycleArray, should start merge.
*/
class SimpleRecycleArray {
public:
    explicit SimpleRecycleArray(int32_t slot_num = 0, int32_t slot_bytes = 0) {
        Init_(slot_num, slot_bytes);
    }

    size_t Insert(int8_t* data, size_t new_length);

    int8_t* Get(size_t offset_idx) {
        slots_[offset_idx]->IncrRef();
        return array_.data() + offset_idx * FLAGS_recycle_array_slot_bytes;
    }

    void Release(size_t offset_idx) {
        slots_[offset_idx]->DecrRef();
    }

private:
    void Init_(int32_t slot_num, int32_t slot_bytes);
    size_t CurrnetOffset_() const {
        return cur_offset_idx_.load(std::memory_order_acquire);
    }
    bool IsOverlap_(
        size_t cur_offset_idx, size_t cur_slot_cost,
        size_t new_offset_idx, size_t new_slot_cost) const;
    bool EnoughToEnd_(
        size_t cur_offset_idx, size_t cur_slot_used, size_t new_slot_used) const;

    std::vector<int8_t> array_;
    std::atomic<size_t> cur_offset_idx_;
    std::vector<std::unique_ptr<OffsetSlot>> slots_;
    bool has_inserted_ = false;
};

}
}