#include "util/memory/array/simple_recycle_array.h"

namespace hawking {
namespace indexlib {

DEFINE_int32(recycle_array_slot_bytes, 4, "recycle array slot bytes");
DEFINE_int32(recycle_array_slot_num, 256, "recycle array slots");
DEFINE_bool(recycle_array_slot_debug, false, "debug switch");

void SimpleRecycleArray::Init_(int32_t slot_num, int32_t slot_bytes) {
    if (!slot_num) {
        slot_num = FLAGS_recycle_array_slot_num;
    }
    if (!slot_bytes) {
        slot_bytes = FLAGS_recycle_array_slot_bytes;
    }

    slots_.reserve(slot_num);
    for (size_t idx = 0; idx < slot_num; ++idx) {
        std::unique_ptr<OffsetSlot> offset_slot = std::make_unique<OffsetSlot>();
        slots_.push_back(std::move(offset_slot));
    }
    array_.resize(slot_num * slot_bytes);
    cur_offset_idx_.store(0, std::memory_order_relaxed);
}

bool SimpleRecycleArray::EnoughToEnd_(
    size_t cur_offset_idx, size_t cur_slot_used, size_t new_slot_used) const {
    return (FLAGS_recycle_array_slot_num - cur_offset_idx - cur_slot_used) >= new_slot_used;
}

bool SimpleRecycleArray::IsOverlap_(
    size_t cur_offset_idx, size_t cur_slot_cost,
    size_t new_offset_idx, size_t new_slot_cost) const {
    size_t cur_last_offset_idx = cur_offset_idx + cur_slot_cost - 1;
    size_t new_last_offset_idx = new_offset_idx + new_slot_cost - 1;
    return !(cur_last_offset_idx < new_offset_idx || cur_offset_idx > new_last_offset_idx);
}

size_t SimpleRecycleArray::Insert(int8_t* data, size_t new_length) {
    // 1. first exlucde the case that `must exceed limit`
    if (new_length > array_.size()) {
        LOG(ERROR) << "Could not insert size "
            << new_length << " larger than all size " << array_.size();
        return -1;
    }

    // only one thread here modify the cur_offset_idx_
    size_t cur_offset_idx = CurrnetOffset_();
    // precise bytes cost
    size_t cur_length = slots_[cur_offset_idx]->Length();
    // calc how many slots be used for cur data
    size_t cur_slot_used = (cur_length / FLAGS_recycle_array_slot_bytes) +
        ((cur_length % FLAGS_recycle_array_slot_bytes) ? 1 : 0);
    // calc how many slots is needed for new data
    size_t slot_used = (new_length / FLAGS_recycle_array_slot_bytes) +
        ((new_length % FLAGS_recycle_array_slot_bytes) ? 1 : 0);
    
    if (FLAGS_recycle_array_slot_debug) {
        LOG(INFO) << "current is from slot " << cur_offset_idx
            << " to " << (cur_offset_idx + cur_slot_used - 1)
            << ", this time would cost " << slot_used << " slots with length "
            << new_length;
    }

    // 2. then exlucde the case that `exceed limit of relate memory`
    size_t relate_length = array_.size() - cur_slot_used * FLAGS_recycle_array_slot_bytes;
    if (new_length > relate_length) {
        LOG(ERROR) << "Could not insert size "
            << new_length << " larger than relate size " << relate_length;
        return -1;
    }
    
    // waste the memory to end when could not room new_length
    // because of avoid save the data with two unconsecutive memory
    bool enough_to_end = EnoughToEnd_(cur_offset_idx, cur_slot_used, slot_used);

    // 3. then exclude the case that `exceed limit of relate memory consider consecutive`
    if (!enough_to_end) {
        size_t waste =
            (FLAGS_recycle_array_slot_num - cur_offset_idx - cur_slot_used) *
                FLAGS_recycle_array_slot_bytes;
        relate_length -= waste;
        if (new_length > relate_length) {
            LOG(ERROR) << "Could not insert size "
                << new_length
                << " larger than relate size consider consecutive "
                << relate_length;
            return -1;
        }
    }

    // 4. make sure the new offset by consider consecutive
    size_t new_offset_idx = enough_to_end ? (cur_offset_idx + cur_slot_used) : 0;

    // 5. avoid overlap the still using slot or overlap current-update range
    if (has_inserted_ &&
        IsOverlap_(cur_offset_idx, cur_slot_used, new_offset_idx, slot_used)) {
        LOG(ERROR) << "Could not insert size "
                << new_length
                << " by overlap: current start " << cur_offset_idx
                << " and current cost slots " << cur_slot_used
                << " new start " << new_offset_idx
                << " and new cost slots " << slot_used;
        return -1;
    }
    for (size_t idx = 0; idx < slot_used; ++idx) {
        if (slots_[new_offset_idx + idx]->Ref()) {
            LOG(ERROR) << "Could not insert size "
                << new_length
                << " by could not found enough to room from still using slot "
                << " new_offset_idx " << (new_offset_idx + idx);
            return -1;
        }
    }

    if (FLAGS_recycle_array_slot_debug) {
        LOG(INFO) << "new update is from slot " << new_offset_idx
            << " to " << (new_offset_idx + slot_used - 1)
            << ", cost " << slot_used << " slots with length "
            << new_length;
    }

    // 6. copy data
    size_t new_length_copy = new_length;
    for (size_t idx = 0; idx < slot_used; ++idx) {
        int8_t* addr =
            array_.data() + (new_offset_idx + idx) * FLAGS_recycle_array_slot_bytes;
        size_t w_len =
            std::min(new_length_copy, static_cast<size_t>(FLAGS_recycle_array_slot_bytes));
        std::memcpy(addr, data, w_len);
        data += w_len;
        new_length_copy -= w_len;
    }

    slots_[new_offset_idx]->SetLength(new_length);

    // 7. atomic update offset
    // external code is recommended to update the offset to offset-file or somewhere by Get,
    // so they could diff the data is in datafile or here.
    cur_offset_idx_.store(new_offset_idx, std::memory_order_release);
    has_inserted_ = true;
    return new_offset_idx;
}

}
}