#include <faabric/state/StateKeyValue.h>
#include <faabric/util/config.h>
#include <faabric/util/locks.h>
#include <faabric/util/logging.h>
#include <faabric/util/macros.h>
#include <faabric/util/memory.h>
#include <faabric/util/timing.h>

#include <cstring>
#include <sys/mman.h>

using namespace faabric::util;

#define ONES_BITMASK 0b11111111

namespace faabric::state {
StateKeyValue::StateKeyValue(const std::string& userIn,
                             const std::string& keyIn)
  : StateKeyValue(userIn, keyIn, 0)
{
    // If using this constructor, we don't know the size, hence cannot use
    // a number of operations.
}

StateKeyValue::StateKeyValue(const std::string& userIn,
                             const std::string& keyIn,
                             size_t sizeIn)
  : user(userIn)
  , key(keyIn)

  , valueSize(sizeIn)
{

    if (sizeIn > 0) {
        configureSize();
    }
}

void StateKeyValue::configureSize()
{
    // Work out size of required shared memory
    size_t nHostPages = getRequiredHostPages(valueSize);
    sharedMemSize = nHostPages * HOST_PAGE_SIZE;
    sharedMemory = nullptr;

    dirtyMask = std::make_unique<uint8_t[]>(valueSize);
    zeroDirtyMask();

    pulledMask = std::make_unique<uint8_t[]>(valueSize);
    ::memset(pulledMask.get(), 0, valueSize);
}

void StateKeyValue::checkSizeConfigured()
{
    if (valueSize <= 0) {
        throw StateKeyValueException(
          fmt::format("{}/{} has no size set", user, key));
    }
}

void StateKeyValue::pull()
{
    SPDLOG_DEBUG("Pulling state for {}/{}", user, key);
    doPull(false);
}

bool StateKeyValue::isChunkPulled(long offset, size_t length)
{
    checkSizeConfigured();

    if (fullyPulled) {
        return true;
    }

    // TODO - more efficient way of checking this
    auto pulledMaskBytes = pulledMask.get();
    for (size_t i = 0; i < length; i++) {
        if (pulledMaskBytes[offset + i] == 0) {
            return false;
        }
    }

    return true;
}

void StateKeyValue::get(uint8_t* buffer)
{
    doPull(true);

    SharedLock lock(valueMutex);
    auto bytePtr = BYTES(sharedMemory);
    std::copy(bytePtr, bytePtr + valueSize, buffer);
}

uint8_t* StateKeyValue::get()
{
    doPull(true);
    return BYTES(sharedMemory);
}

void StateKeyValue::getChunk(long offset, uint8_t* buffer, size_t length)
{
    doPullChunk(true, offset, length);

    SharedLock lock(valueMutex);
    auto bytePtr = BYTES(sharedMemory);
    std::copy(bytePtr + offset, bytePtr + offset + length, buffer);
}

uint8_t* StateKeyValue::getChunk(long offset, long len)
{
    doPullChunk(true, offset, len);
    return BYTES(sharedMemory) + offset;
}

std::vector<StateChunk> StateKeyValue::getAllChunks()
{
    // Divide the whole value up into chunks
    auto nChunks = uint32_t((valueSize + STATE_STREAMING_CHUNK_SIZE - 1) /
                            STATE_STREAMING_CHUNK_SIZE);

    std::vector<StateChunk> chunks;
    for (uint32_t i = 0; i < nChunks; i++) {
        uint32_t previousChunkEnd = i * STATE_STREAMING_CHUNK_SIZE;
        uint8_t* chunkStart = BYTES(sharedMemory) + previousChunkEnd;
        size_t chunkSize = std::min((size_t)STATE_STREAMING_CHUNK_SIZE,
                                    valueSize - previousChunkEnd);
        chunks.emplace_back(previousChunkEnd, chunkSize, chunkStart);
    }

    return chunks;
}

void StateKeyValue::set(const uint8_t* buffer)
{
    checkSizeConfigured();

    // Unique lock for setting the whole value
    FullLock lock(valueMutex);
    doSet(buffer);
    isDirty = true;
}

void StateKeyValue::doSet(const uint8_t* buffer)
{
    checkSizeConfigured();

    // Set up storage
    allocateChunk(0, sharedMemSize);

    // Copy data into shared region
    std::copy(buffer, buffer + valueSize, BYTES(sharedMemory));

    // Make sure we flag that this value has now been set
    fullyPulled = true;
}

void StateKeyValue::append(const uint8_t* buffer, size_t length)
{
    FullLock lock(valueMutex);

    appendToRemote(buffer, length);
}

void StateKeyValue::getAppended(uint8_t* buffer, size_t length, long nValues)
{
    SharedLock lock(valueMutex);

    pullAppendedFromRemote(buffer, length, nValues);
}

void StateKeyValue::clearAppended()
{
    SharedLock lock(valueMutex);

    clearAppendedFromRemote();
}

void StateKeyValue::setChunk(long offset, const uint8_t* buffer, size_t length)
{
    checkSizeConfigured();

    FullLock lock(valueMutex);

    doSetChunk(offset, buffer, length);
    markDirtyChunk(offset, length);
}

void StateKeyValue::doSetChunk(long offset,
                               const uint8_t* buffer,
                               size_t length)
{
    checkSizeConfigured();

    // Check we're in bounds - note that we permit chunks within the _allocated_
    // memory
    size_t chunkEnd = offset + length;
    if (chunkEnd > sharedMemSize) {
        SPDLOG_ERROR("Setting chunk out of bounds on {}/{} ({} > {})",
                     user,
                     key,
                     chunkEnd,
                     valueSize);
        throw std::runtime_error("Attempting to set chunk out of bounds");
    }

    // If necessary, allocate the memory
    allocateChunk(offset, length);

    // Do the copy if necessary
    if (buffer != nullptr) {
        std::copy(buffer, buffer + length, BYTES(sharedMemory) + offset);
    }
}

void StateKeyValue::flagDirty()
{
    faabric::util::SharedLock lock(valueMutex);
    isDirty = true;
}

void StateKeyValue::zeroDirtyMask()
{
    checkSizeConfigured();
    ::memset(dirtyMask.get(), 0, valueSize);
}

void StateKeyValue::flagChunkDirty(long offset, long len)
{
    checkSizeConfigured();

    faabric::util::SharedLock lock(valueMutex);
    markDirtyChunk(offset, len);
}

void StateKeyValue::markDirtyChunk(long offset, long len)
{
    isDirty |= true;
    ::memset(dirtyMask.get() + offset, ONES_BITMASK, len);
}

size_t StateKeyValue::size() const
{
    return valueSize;
}

size_t StateKeyValue::getSharedMemorySize() const
{
    return sharedMemSize;
}

void StateKeyValue::mapSharedMemory(void* destination,
                                    long pagesOffset,
                                    long nPages)
{
    checkSizeConfigured();

    PROF_START(mapSharedMem)

    if (!isPageAligned(destination)) {
        SPDLOG_ERROR("Non-aligned destination for shared mapping of {}", key);
        throw std::runtime_error("Mapping misaligned shared memory");
    }

    // Full lock to perform the shared mapping
    FullLock lock(valueMutex);

    // Ensure the underlying memory is allocated
    size_t offset = pagesOffset * faabric::util::HOST_PAGE_SIZE;
    size_t length = nPages * faabric::util::HOST_PAGE_SIZE;
    allocateChunk(offset, length);

    // Add a mapping of the relevant pages of shared memory onto the new region
    void* result = mremap(BYTES(sharedMemory) + offset,
                          0,
                          length,
                          MREMAP_FIXED | MREMAP_MAYMOVE,
                          destination);

    // Handle failure
    if (result == MAP_FAILED) {
        SPDLOG_ERROR("Failed mapping for {} at {} with size {}. errno: {} ({})",
                     key,
                     offset,
                     length,
                     errno,
                     strerror(errno));

        throw std::runtime_error("Failed mapping shared memory");
    }

    // Check the mapping is where we expect it to be
    if (destination != result) {
        SPDLOG_ERROR("New mapped addr for {} doesn't match required {} != {}",
                     key,
                     destination,
                     result);
        throw std::runtime_error("Misaligned shared memory mapping");
    }

    PROF_END(mapSharedMem)
}

void StateKeyValue::unmapSharedMemory(void* mappedAddr)
{
    FullLock lock(valueMutex);

    if (!isPageAligned(mappedAddr)) {
        SPDLOG_ERROR("Attempting to unmap non-page-aligned memory at {} for {}",
                     mappedAddr,
                     key);
        throw std::runtime_error("Unmapping misaligned shared memory");
    }

    // Unmap the current memory so it can be reused
    int result = munmap(mappedAddr, sharedMemSize);
    if (result == -1) {
        SPDLOG_ERROR(
          "Failed to unmap shared memory at {} with size {}. errno: {}",
          mappedAddr,
          sharedMemSize,
          errno);

        throw std::runtime_error("Failed unmapping shared memory");
    }
}

void StateKeyValue::allocateChunk(long offset, size_t length)
{
    // Can skip if the whole thing is already allocated
    if (fullyAllocated) {
        return;
    }

    // Ensure storage is reserved
    reserveStorage();

    // Page-align the chunk
    AlignedChunk chunk = getPageAlignedChunk(offset, length);

    // Make sure all the pages involved are writable
    int res = mprotect(
      BYTES(sharedMemory) + chunk.nBytesOffset, chunk.nBytesLength, PROT_WRITE);
    if (res != 0) {
        SPDLOG_DEBUG("Allocating memory for {}/{} of size {} failed: {} ({})",
                     user,
                     key,
                     length,
                     errno,
                     strerror(errno));
        throw std::runtime_error("Failed allocating memory for KV");
    }

    // Flag if we've now allocated the whole value
    if (offset == 0 && length == sharedMemSize) {
        fullyAllocated = true;
    }
}

void StateKeyValue::reserveStorage()
{
    checkSizeConfigured();

    // Check if already reserved
    if (sharedMemory != nullptr) {
        return;
    }

    PROF_START(reserveStorage)

    if (sharedMemSize == 0) {
        throw StateKeyValueException("Reserving storage with no size for " +
                                     key);
    }

    // Create shared memory region with no permissions
    sharedMemory = mmap(
      nullptr, sharedMemSize, PROT_NONE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
    if (sharedMemory == MAP_FAILED) {
        SPDLOG_DEBUG("Mmapping of storage size {} failed. errno: {}",
                     sharedMemSize,
                     errno);

        throw std::runtime_error("Failed mapping memory for KV");
    }

    SPDLOG_DEBUG("Reserved {} pages of shared storage for {}",
                 sharedMemSize / HOST_PAGE_SIZE,
                 key);

    PROF_END(reserveStorage)
}

void StateKeyValue::pushPartialMask(
  const std::shared_ptr<StateKeyValue>& maskKv)
{
    if (maskKv->valueSize != valueSize) {
        std::string msg =
          "Different sizes: mask=" + std::to_string(maskKv->valueSize) +
          " and value=" + std::to_string(valueSize);

        throw StateKeyValueException(msg);
    }

    uint8_t* maskPtr = maskKv->get();
    doPushPartial(maskPtr);
}

void StateKeyValue::pushPartial()
{
    checkSizeConfigured();

    auto dirtyMaskBytes = dirtyMask.get();
    doPushPartial(dirtyMaskBytes);
}

void StateKeyValue::pushFull()
{
    checkSizeConfigured();

    // Ignore if not dirty
    if (!isDirty) {
        return;
    }

    // Get full lock for complete push
    faabric::util::FullLock fullLock(valueMutex);

    // Double check condition
    if (!isDirty) {
        return;
    }

    pushToRemote();

    // Remove any dirty flags
    isDirty = false;
    zeroDirtyMask();
}

void StateKeyValue::doPull(bool lazy)
{
    checkSizeConfigured();

    // Drop out if we already have the data and we don't care about updating
    {
        faabric::util::SharedLock lock(valueMutex);
        if (lazy && fullyPulled) {
            return;
        }
    }

    // Unique lock on the whole value
    faabric::util::FullLock lock(valueMutex);

    // Check again if we need to do this
    if (lazy && fullyPulled) {
        return;
    }

    // Make sure storage is allocated
    allocateChunk(0, sharedMemSize);

    // Do the pull
    pullFromRemote();
    fullyPulled = true;
}

void StateKeyValue::doPullChunk(bool lazy, long offset, size_t length)
{
    checkSizeConfigured();

    // Check bounds
    size_t chunkEnd = offset + length;
    if (chunkEnd > valueSize) {
        SPDLOG_ERROR("Pulling chunk out of bounds on {}/{} ({} > {})",
                     user,
                     key,
                     chunkEnd,
                     valueSize);
        throw std::runtime_error("Out of bounds chunk");
    }

    // Drop out if we already have the data and we don't care about updating
    {
        faabric::util::SharedLock lock(valueMutex);
        if (lazy && isChunkPulled(offset, length)) {
            return;
        }
    }

    // Unique lock
    faabric::util::FullLock lock(valueMutex);

    // Check condition again
    if (lazy && isChunkPulled(offset, length)) {
        return;
    }

    // Allocate the required memory
    allocateChunk(offset, length);

    // Pull from remote
    pullChunkFromRemote(offset, length);

    // Mark the chunk as pulled
    ::memset(pulledMask.get() + offset, ONES_BITMASK, length);
}

void StateKeyValue::doPushPartial(const uint8_t* dirtyMaskBytes)
{
    // Ignore if not dirty
    if (!isDirty) {
        return;
    }

    // We need a full lock while doing this, mainly to ensure no other threads
    // start the same process
    faabric::util::FullLock lock(valueMutex);

    // Double check condition
    if (!isDirty) {
        SPDLOG_DEBUG("No need for partial push on {}", key);
        return;
    }

    // Work out what's dirty
    const std::vector<StateChunk>& chunks = getDirtyChunks(dirtyMaskBytes);

    // Zero the mask now that we're finished with it
    ::memset((void*)dirtyMaskBytes, 0, valueSize);

    // Push
    pushPartialToRemote(chunks);

    // Update if necessary
    if (fullyAllocated) {
        pullFromRemote();
    }

    // Mark as no longer dirty
    isDirty = false;
}

uint32_t StateKeyValue::waitOnRedisRemoteLock(const std::string& redisKey)
{
    PROF_START(remoteLock)

    redis::Redis& redis = redis::Redis::getState();
    uint32_t remoteLockId =
      redis.acquireLock(redisKey, REMOTE_LOCK_TIMEOUT_SECS);
    unsigned int retryCount = 0;
    while (remoteLockId == 0) {
        SPDLOG_DEBUG(
          "Waiting on remote lock for {} (loop {})", redisKey, retryCount);

        if (retryCount >= REMOTE_LOCK_MAX_RETRIES) {
            SPDLOG_ERROR("Timed out waiting for lock on {}", redisKey);
            break;
        }

        SLEEP_MS(500);

        remoteLockId = redis.acquireLock(redisKey, REMOTE_LOCK_TIMEOUT_SECS);
        retryCount++;
    }

    PROF_END(remoteLock)
    return remoteLockId;
}

void StateKeyValue::lockRead()
{
    valueMutex.lock_shared();
}

void StateKeyValue::unlockRead()
{
    valueMutex.unlock_shared();
}

void StateKeyValue::lockWrite()
{
    valueMutex.lock();
}

void StateKeyValue::unlockWrite()
{
    valueMutex.unlock();
}

std::vector<StateChunk> StateKeyValue::getDirtyChunks(
  const uint8_t* dirtyMaskBytes)
{
    std::vector<StateChunk> chunks;

    auto sharedMemoryBytes = BYTES(sharedMemory);
    long startIdx = 0;
    bool isOn = false;

    // Iterate through looking for dirty chunks
    for (size_t i = 0; i < valueSize; i++) {
        if (dirtyMaskBytes[i] == 0) {
            // If we encounter an "off" mask and we're "on", switch off and
            // write the chunk
            if (isOn) {
                isOn = false;

                // Record the chunk
                unsigned long length = i - startIdx;
                chunks.emplace_back(
                  startIdx, length, sharedMemoryBytes + startIdx);
            }
        } else {
            if (!isOn) {
                isOn = true;
                startIdx = i;
            }
        }
    }

    // Add the final chunk if necessary
    if (isOn) {
        unsigned long length = valueSize - startIdx;
        chunks.emplace_back(startIdx, length, sharedMemoryBytes + startIdx);
    }

    return chunks;
}
}
